summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Coca <brian.coca+git@gmail.com>2016-10-03 13:19:01 -0400
committerBrian Coca <brian.coca+git@gmail.com>2016-10-03 13:19:01 -0400
commit99ac4ecc88cac285fa9fe7be4a3f82ed29e38701 (patch)
treecffacaf38ea5f528716ce346d7c56bacd63e24e6
parent4c75a7d290b605e28c127059c9ca25e859931d59 (diff)
parentdf35d324d62e6034ab86db0fb4a56d3ca122d4b2 (diff)
downloadansible-unified_ansible.tar.gz
readded extras into mainunified_ansible
-rw-r--r--lib/ansible/modules/extras/.github/ISSUE_TEMPLATE.md55
-rw-r--r--lib/ansible/modules/extras/.github/PULL_REQUEST_TEMPLATE.md28
-rw-r--r--lib/ansible/modules/extras/.gitignore53
-rw-r--r--lib/ansible/modules/extras/CONTRIBUTING.md37
-rw-r--r--lib/ansible/modules/extras/COPYING675
-rw-r--r--lib/ansible/modules/extras/GUIDELINES.md73
-rw-r--r--lib/ansible/modules/extras/MAINTAINERS.md1
-rw-r--r--lib/ansible/modules/extras/README.md28
-rw-r--r--lib/ansible/modules/extras/REVIEWERS.md58
-rw-r--r--lib/ansible/modules/extras/VERSION1
-rw-r--r--lib/ansible/modules/extras/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/GUIDELINES.md264
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/cloudformation_facts.py285
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/cloudtrail.py229
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/cloudwatchevent_rule.py409
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/dynamodb_table.py416
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_ami_copy.py253
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_asg_facts.py355
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_customer_gateway.py267
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_elb_facts.py245
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_eni.py568
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_eni_facts.py177
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_lc_find.py224
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_remote_facts.py186
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_snapshot_facts.py226
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vol_facts.py139
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options.py385
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options_facts.py167
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_igw.py159
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl.py546
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl_facts.py201
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nat_gateway.py1085
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_net_facts.py125
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_peer.py363
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table.py634
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table_facts.py125
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet.py272
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet_facts.py140
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_vpc_vgw.py598
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ec2_win_password.py175
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ecs_cluster.py238
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ecs_service.py426
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ecs_service_facts.py236
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ecs_task.py324
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/ecs_taskdefinition.py221
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/efs.py629
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/efs_facts.py377
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/execute_lambda.py281
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/iam_mfa_device_facts.py118
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/iam_server_certificate_facts.py172
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/kinesis_stream.py1098
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/lambda.py437
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/lambda_alias.py384
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/lambda_event.py422
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/lambda_facts.py408
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/redshift.py497
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/redshift_subnet_group.py182
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/route53_facts.py436
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/route53_health_check.py358
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/route53_zone.py224
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/s3_bucket.py437
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/s3_lifecycle.py434
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/s3_logging.py179
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/s3_website.py293
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/sns_topic.py407
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/sqs_queue.py273
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/sts_assume_role.py154
-rw-r--r--lib/ansible/modules/extras/cloud/amazon/sts_session_token.py159
-rw-r--r--lib/ansible/modules/extras/cloud/atomic/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/atomic/atomic_host.py105
-rw-r--r--lib/ansible/modules/extras/cloud/atomic/atomic_image.py137
-rw-r--r--lib/ansible/modules/extras/cloud/azure/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/azure/azure_rm_deployment.py661
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/__init__.py1
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_aa_policy.py351
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_alert_policy.py537
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_blueprint_package.py302
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_firewall_policy.py597
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_group.py513
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_loadbalancer.py936
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_modify_server.py977
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_publicip.py363
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_server.py1585
-rw-r--r--lib/ansible/modules/extras/cloud/centurylink/clc_server_snapshot.py413
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_account.py381
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_affinitygroup.py251
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_cluster.py417
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_configuration.py288
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_domain.py270
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_facts.py222
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_firewall.py429
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_instance.py1002
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_instance_facts.py273
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_instancegroup.py201
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_ip_address.py267
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_iso.py335
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule.py380
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule_member.py360
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_network.py580
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_pod.py301
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_portforward.py379
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_project.py307
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_resourcelimit.py216
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_router.py374
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup.py219
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup_rule.py421
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_snapshot_policy.py321
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_sshkeypair.py242
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_staticnat.py271
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_template.py668
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_user.py451
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_vmsnapshot.py300
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_volume.py492
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_zone.py402
-rw-r--r--lib/ansible/modules/extras/cloud/cloudstack/cs_zone_facts.py200
-rw-r--r--lib/ansible/modules/extras/cloud/google/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/google/gcdns_record.py790
-rw-r--r--lib/ansible/modules/extras/cloud/google/gcdns_zone.py381
-rw-r--r--lib/ansible/modules/extras/cloud/google/gce_img.py230
-rw-r--r--lib/ansible/modules/extras/cloud/google/gce_tag.py229
-rw-r--r--lib/ansible/modules/extras/cloud/lxc/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/lxc/lxc_container.py1759
-rw-r--r--lib/ansible/modules/extras/cloud/lxd/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/lxd/lxd_container.py611
-rw-r--r--lib/ansible/modules/extras/cloud/lxd/lxd_profile.py374
-rw-r--r--lib/ansible/modules/extras/cloud/misc/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/misc/ovirt.py521
-rw-r--r--lib/ansible/modules/extras/cloud/misc/proxmox.py468
-rw-r--r--lib/ansible/modules/extras/cloud/misc/proxmox_template.py232
-rw-r--r--lib/ansible/modules/extras/cloud/misc/rhevm.py1530
-rw-r--r--lib/ansible/modules/extras/cloud/misc/virt.py524
-rwxr-xr-xlib/ansible/modules/extras/cloud/misc/virt_net.py594
-rwxr-xr-xlib/ansible/modules/extras/cloud/misc/virt_pool.py690
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_flavor_facts.py225
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_group.py167
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_ironic_inspect.py169
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_keystone_domain.py191
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_keystone_domain_facts.py137
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_keystone_role.py136
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_keystone_service.py210
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_port_facts.py225
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_project.py228
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_project_facts.py163
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_recordset.py242
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_server_group.py182
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_stack.py262
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_user_facts.py172
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_user_role.py212
-rw-r--r--lib/ansible/modules/extras/cloud/openstack/os_zone.py237
-rw-r--r--lib/ansible/modules/extras/cloud/ovh/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/ovh/ovh_ip_loadbalancing_backend.py312
-rw-r--r--lib/ansible/modules/extras/cloud/ovirt/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/ovirt/ovirt_auth.py233
-rw-r--r--lib/ansible/modules/extras/cloud/ovirt/ovirt_disks.py316
-rw-r--r--lib/ansible/modules/extras/cloud/ovirt/ovirt_vms.py806
-rw-r--r--lib/ansible/modules/extras/cloud/profitbricks/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/profitbricks/profitbricks.py662
-rw-r--r--lib/ansible/modules/extras/cloud/profitbricks/profitbricks_datacenter.py258
-rw-r--r--lib/ansible/modules/extras/cloud/profitbricks/profitbricks_nic.py290
-rw-r--r--lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume.py423
-rw-r--r--lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume_attachments.py262
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/rax_clb_ssl.py269
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/rax_mon_alarm.py227
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/rax_mon_check.py313
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/rax_mon_entity.py192
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification.py176
-rw-r--r--lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification_plan.py181
-rw-r--r--lib/ansible/modules/extras/cloud/smartos/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/smartos/smartos_image_facts.py117
-rw-r--r--lib/ansible/modules/extras/cloud/softlayer/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/softlayer/sl_vm.py360
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vca_fw.py245
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vca_nat.py215
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vca_vapp.py282
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_cluster.py251
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_datacenter.py160
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_dns_config.py130
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_dvs_host.py249
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_dvs_portgroup.py198
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_dvswitch.py209
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_guest.py959
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_host.py225
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_local_user_manager.py191
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_maintenancemode.py212
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_migrate_vmk.py196
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_portgroup.py163
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_target_canonical_facts.py95
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vm_facts.py101
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vm_shell.py186
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vm_vss_dvs_migrate.py158
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel.py208
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py123
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vmotion.py150
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vsan_cluster.py130
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vmware_vswitch.py199
-rw-r--r--lib/ansible/modules/extras/cloud/vmware/vsphere_copy.py177
-rw-r--r--lib/ansible/modules/extras/cloud/webfaction/__init__.py0
-rw-r--r--lib/ansible/modules/extras/cloud/webfaction/webfaction_app.py199
-rw-r--r--lib/ansible/modules/extras/cloud/webfaction/webfaction_db.py200
-rw-r--r--lib/ansible/modules/extras/cloud/webfaction/webfaction_domain.py171
-rw-r--r--lib/ansible/modules/extras/cloud/webfaction/webfaction_mailbox.py139
-rw-r--r--lib/ansible/modules/extras/cloud/webfaction/webfaction_site.py210
-rw-r--r--lib/ansible/modules/extras/cloud/xenserver_facts.py204
-rw-r--r--lib/ansible/modules/extras/clustering/__init__.py0
-rw-r--r--lib/ansible/modules/extras/clustering/consul.py572
-rw-r--r--lib/ansible/modules/extras/clustering/consul_acl.py363
-rw-r--r--lib/ansible/modules/extras/clustering/consul_kv.py289
-rw-r--r--lib/ansible/modules/extras/clustering/consul_session.py282
-rw-r--r--lib/ansible/modules/extras/clustering/kubernetes.py399
-rw-r--r--lib/ansible/modules/extras/clustering/znode.py234
-rw-r--r--lib/ansible/modules/extras/commands/__init__.py0
-rw-r--r--lib/ansible/modules/extras/commands/expect.py234
-rw-r--r--lib/ansible/modules/extras/database/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/influxdb/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/influxdb/influxdb_database.py194
-rw-r--r--lib/ansible/modules/extras/database/influxdb/influxdb_retention_policy.py237
-rw-r--r--lib/ansible/modules/extras/database/misc/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/misc/mongodb_parameter.py228
-rw-r--r--lib/ansible/modules/extras/database/misc/mongodb_user.py410
-rw-r--r--lib/ansible/modules/extras/database/misc/redis.py322
-rw-r--r--lib/ansible/modules/extras/database/misc/riak.py262
-rw-r--r--lib/ansible/modules/extras/database/mssql/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/mssql/mssql_db.py232
-rw-r--r--lib/ansible/modules/extras/database/mysql/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/mysql/mysql_replication.py362
-rw-r--r--lib/ansible/modules/extras/database/postgresql/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/postgresql/postgresql_ext.py187
-rw-r--r--lib/ansible/modules/extras/database/postgresql/postgresql_lang.py270
-rw-r--r--lib/ansible/modules/extras/database/vertica/__init__.py0
-rw-r--r--lib/ansible/modules/extras/database/vertica/vertica_configuration.py194
-rw-r--r--lib/ansible/modules/extras/database/vertica/vertica_facts.py276
-rw-r--r--lib/ansible/modules/extras/database/vertica/vertica_role.py243
-rw-r--r--lib/ansible/modules/extras/database/vertica/vertica_schema.py317
-rw-r--r--lib/ansible/modules/extras/database/vertica/vertica_user.py388
-rw-r--r--lib/ansible/modules/extras/files/__init__.py0
-rw-r--r--lib/ansible/modules/extras/files/archive.py401
-rwxr-xr-xlib/ansible/modules/extras/files/blockinfile.py320
-rw-r--r--lib/ansible/modules/extras/files/patch.py196
-rw-r--r--lib/ansible/modules/extras/identity/__init__.py0
-rw-r--r--lib/ansible/modules/extras/identity/opendj/__init__.py0
-rw-r--r--lib/ansible/modules/extras/identity/opendj/opendj_backendprop.py217
-rw-r--r--lib/ansible/modules/extras/messaging/__init__.py0
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_binding.py219
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_exchange.py218
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_parameter.py159
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_plugin.py150
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_policy.py168
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_queue.py263
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_user.py302
-rw-r--r--lib/ansible/modules/extras/messaging/rabbitmq_vhost.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/__init__.py0
-rw-r--r--lib/ansible/modules/extras/monitoring/airbrake_deployment.py131
-rw-r--r--lib/ansible/modules/extras/monitoring/bigpanda.py180
-rw-r--r--lib/ansible/modules/extras/monitoring/boundary_meter.py264
-rw-r--r--lib/ansible/modules/extras/monitoring/circonus_annotation.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/datadog_event.py165
-rw-r--r--lib/ansible/modules/extras/monitoring/datadog_monitor.py310
-rw-r--r--lib/ansible/modules/extras/monitoring/honeybadger_deployment.py141
-rw-r--r--lib/ansible/modules/extras/monitoring/librato_annotation.py161
-rw-r--r--lib/ansible/modules/extras/monitoring/logentries.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/logicmonitor.py2169
-rw-r--r--lib/ansible/modules/extras/monitoring/logicmonitor_facts.py632
-rw-r--r--lib/ansible/modules/extras/monitoring/monit.py185
-rw-r--r--lib/ansible/modules/extras/monitoring/nagios.py1030
-rw-r--r--lib/ansible/modules/extras/monitoring/newrelic_deployment.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/pagerduty.py299
-rw-r--r--lib/ansible/modules/extras/monitoring/pagerduty_alert.py213
-rw-r--r--lib/ansible/modules/extras/monitoring/pingdom.py152
-rw-r--r--lib/ansible/modules/extras/monitoring/rollbar_deployment.py134
-rw-r--r--lib/ansible/modules/extras/monitoring/sensu_check.py384
-rw-r--r--lib/ansible/modules/extras/monitoring/sensu_subscription.py161
-rw-r--r--lib/ansible/modules/extras/monitoring/stackdriver.py216
-rw-r--r--lib/ansible/modules/extras/monitoring/statusio_maintenance.py480
-rw-r--r--lib/ansible/modules/extras/monitoring/uptimerobot.py168
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_group.py225
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_host.py562
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py243
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_maintenance.py377
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_screen.py435
-rw-r--r--lib/ansible/modules/extras/network/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/a10/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/a10/a10_server.py294
-rw-r--r--lib/ansible/modules/extras/network/a10/a10_service_group.py341
-rw-r--r--lib/ansible/modules/extras/network/a10/a10_virtual_server.py297
-rw-r--r--lib/ansible/modules/extras/network/asa/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/asa/asa_acl.py230
-rw-r--r--lib/ansible/modules/extras/network/asa/asa_command.py224
-rw-r--r--lib/ansible/modules/extras/network/asa/asa_config.py327
-rw-r--r--lib/ansible/modules/extras/network/citrix/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/citrix/netscaler.py189
-rw-r--r--lib/ansible/modules/extras/network/cloudflare_dns.py644
-rw-r--r--lib/ansible/modules/extras/network/dnsimple.py305
-rw-r--r--lib/ansible/modules/extras/network/dnsmadeeasy.py368
-rw-r--r--lib/ansible/modules/extras/network/exoscale/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/exoscale/exo_dns_domain.py255
-rw-r--r--lib/ansible/modules/extras/network/exoscale/exo_dns_record.py391
-rw-r--r--lib/ansible/modules/extras/network/f5/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_device_dns.py397
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_device_ntp.py257
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_device_sshd.py344
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_facts.py1724
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_gtm_datacenter.py366
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_gtm_virtual_server.py235
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py158
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_irule.py385
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_monitor_http.py443
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_monitor_tcp.py485
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_node.py463
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_pool.py561
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_pool_member.py505
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_routedomain.py523
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_selfip.py659
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_ssl_certificate.py516
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_sys_db.py221
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_virtual_server.py614
-rw-r--r--lib/ansible/modules/extras/network/f5/bigip_vlan.py445
-rw-r--r--lib/ansible/modules/extras/network/haproxy.py351
-rw-r--r--lib/ansible/modules/extras/network/illumos/__init__.py0
-rw-r--r--lib/ansible/modules/extras/network/illumos/dladm_etherstub.py171
-rw-r--r--lib/ansible/modules/extras/network/illumos/dladm_vnic.py258
-rw-r--r--lib/ansible/modules/extras/network/illumos/flowadm.py503
-rw-r--r--lib/ansible/modules/extras/network/illumos/ipadm_if.py222
-rw-r--r--lib/ansible/modules/extras/network/illumos/ipadm_prop.py264
-rw-r--r--lib/ansible/modules/extras/network/ipify_facts.py100
-rw-r--r--lib/ansible/modules/extras/network/lldp.py86
-rw-r--r--lib/ansible/modules/extras/network/netconf/__init__.py0
-rwxr-xr-xlib/ansible/modules/extras/network/netconf/netconf_config.py221
-rw-r--r--lib/ansible/modules/extras/network/nmcli.py1086
-rw-r--r--lib/ansible/modules/extras/network/openvswitch_bridge.py270
-rw-r--r--lib/ansible/modules/extras/network/openvswitch_db.py132
-rw-r--r--lib/ansible/modules/extras/network/openvswitch_port.py272
-rw-r--r--lib/ansible/modules/extras/network/snmp_facts.py367
-rw-r--r--lib/ansible/modules/extras/network/wakeonlan.py126
-rw-r--r--lib/ansible/modules/extras/notification/__init__.py0
-rw-r--r--lib/ansible/modules/extras/notification/campfire.py138
-rw-r--r--lib/ansible/modules/extras/notification/flowdock.py193
-rw-r--r--lib/ansible/modules/extras/notification/grove.py117
-rwxr-xr-xlib/ansible/modules/extras/notification/hall.py97
-rw-r--r--lib/ansible/modules/extras/notification/hipchat.py219
-rw-r--r--lib/ansible/modules/extras/notification/irc.py300
-rw-r--r--lib/ansible/modules/extras/notification/jabber.py165
-rw-r--r--lib/ansible/modules/extras/notification/mail.py302
-rw-r--r--lib/ansible/modules/extras/notification/mqtt.py166
-rw-r--r--lib/ansible/modules/extras/notification/nexmo.py141
-rw-r--r--lib/ansible/modules/extras/notification/osx_say.py76
-rw-r--r--lib/ansible/modules/extras/notification/pushbullet.py187
-rw-r--r--lib/ansible/modules/extras/notification/pushover.py116
-rw-r--r--lib/ansible/modules/extras/notification/rocketchat.py251
-rw-r--r--lib/ansible/modules/extras/notification/sendgrid.py271
-rw-r--r--lib/ansible/modules/extras/notification/slack.py259
-rw-r--r--lib/ansible/modules/extras/notification/sns.py201
-rw-r--r--lib/ansible/modules/extras/notification/telegram.py103
-rw-r--r--lib/ansible/modules/extras/notification/twilio.py177
-rw-r--r--lib/ansible/modules/extras/notification/typetalk.py137
-rw-r--r--lib/ansible/modules/extras/packaging/__init__.py0
-rw-r--r--lib/ansible/modules/extras/packaging/dpkg_selections.py76
-rw-r--r--lib/ansible/modules/extras/packaging/elasticsearch_plugin.py208
-rw-r--r--lib/ansible/modules/extras/packaging/kibana_plugin.py237
-rw-r--r--lib/ansible/modules/extras/packaging/language/__init__.py0
-rw-r--r--lib/ansible/modules/extras/packaging/language/bower.py227
-rw-r--r--lib/ansible/modules/extras/packaging/language/bundler.py211
-rw-r--r--lib/ansible/modules/extras/packaging/language/composer.py233
-rw-r--r--lib/ansible/modules/extras/packaging/language/cpanm.py220
-rw-r--r--lib/ansible/modules/extras/packaging/language/maven_artifact.py390
-rw-r--r--lib/ansible/modules/extras/packaging/language/npm.py271
-rw-r--r--lib/ansible/modules/extras/packaging/language/pear.py227
-rw-r--r--lib/ansible/modules/extras/packaging/os/__init__.py0
-rw-r--r--lib/ansible/modules/extras/packaging/os/apk.py248
-rw-r--r--lib/ansible/modules/extras/packaging/os/dnf.py355
-rwxr-xr-xlib/ansible/modules/extras/packaging/os/homebrew.py870
-rwxr-xr-xlib/ansible/modules/extras/packaging/os/homebrew_cask.py586
-rw-r--r--lib/ansible/modules/extras/packaging/os/homebrew_tap.py250
-rw-r--r--lib/ansible/modules/extras/packaging/os/layman.py261
-rw-r--r--lib/ansible/modules/extras/packaging/os/macports.py217
-rw-r--r--lib/ansible/modules/extras/packaging/os/openbsd_pkg.py522
-rw-r--r--lib/ansible/modules/extras/packaging/os/opkg.py169
-rw-r--r--lib/ansible/modules/extras/packaging/os/pacman.py350
-rw-r--r--lib/ansible/modules/extras/packaging/os/pkg5.py168
-rw-r--r--lib/ansible/modules/extras/packaging/os/pkg5_publisher.py201
-rwxr-xr-xlib/ansible/modules/extras/packaging/os/pkgin.py371
-rw-r--r--lib/ansible/modules/extras/packaging/os/pkgng.py353
-rw-r--r--lib/ansible/modules/extras/packaging/os/pkgutil.py223
-rw-r--r--lib/ansible/modules/extras/packaging/os/portage.py457
-rw-r--r--lib/ansible/modules/extras/packaging/os/portinstall.py206
-rw-r--r--lib/ansible/modules/extras/packaging/os/slackpkg.py199
-rw-r--r--lib/ansible/modules/extras/packaging/os/svr4pkg.py245
-rw-r--r--lib/ansible/modules/extras/packaging/os/swdepot.py196
-rw-r--r--lib/ansible/modules/extras/packaging/os/urpmi.py200
-rw-r--r--lib/ansible/modules/extras/packaging/os/yum_repository.py754
-rw-r--r--lib/ansible/modules/extras/packaging/os/zypper.py448
-rw-r--r--lib/ansible/modules/extras/packaging/os/zypper_repository.py387
-rw-r--r--lib/ansible/modules/extras/remote_management/__init__.py0
-rw-r--r--lib/ansible/modules/extras/remote_management/ipmi/__init__.py0
-rw-r--r--lib/ansible/modules/extras/remote_management/ipmi/ipmi_boot.py186
-rw-r--r--lib/ansible/modules/extras/remote_management/ipmi/ipmi_power.py138
-rw-r--r--lib/ansible/modules/extras/shippable.yml65
-rw-r--r--lib/ansible/modules/extras/source_control/__init__.py0
-rw-r--r--lib/ansible/modules/extras/source_control/bzr.py199
-rw-r--r--lib/ansible/modules/extras/source_control/git_config.py219
-rw-r--r--lib/ansible/modules/extras/source_control/github_hooks.py194
-rw-r--r--lib/ansible/modules/extras/source_control/github_key.py243
-rw-r--r--lib/ansible/modules/extras/source_control/github_release.py121
-rw-r--r--lib/ansible/modules/extras/source_control/gitlab_group.py218
-rw-r--r--lib/ansible/modules/extras/source_control/gitlab_project.py401
-rw-r--r--lib/ansible/modules/extras/source_control/gitlab_user.py351
-rw-r--r--lib/ansible/modules/extras/storage/__init__.py0
-rw-r--r--lib/ansible/modules/extras/storage/netapp/README.md454
-rw-r--r--lib/ansible/modules/extras/storage/netapp/__init__.py0
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_amg.py328
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_amg_role.py239
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_amg_sync.py269
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_auth.py269
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_facts.py201
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_flashcache.py420
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_host.py425
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_hostgroup.py413
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_lun_mapping.py350
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_group.py382
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_images.py250
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_volume.py287
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_storage_system.py306
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_storagepool.py884
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_volume.py618
-rw-r--r--lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py439
-rw-r--r--lib/ansible/modules/extras/system/__init__.py0
-rw-r--r--lib/ansible/modules/extras/system/alternatives.py158
-rw-r--r--lib/ansible/modules/extras/system/at.py200
-rw-r--r--lib/ansible/modules/extras/system/capabilities.py186
-rw-r--r--lib/ansible/modules/extras/system/cronvar.py433
-rw-r--r--lib/ansible/modules/extras/system/crypttab.py365
-rw-r--r--lib/ansible/modules/extras/system/debconf.py178
-rw-r--r--lib/ansible/modules/extras/system/facter.py61
-rw-r--r--lib/ansible/modules/extras/system/filesystem.py256
-rw-r--r--lib/ansible/modules/extras/system/firewalld.py641
-rw-r--r--lib/ansible/modules/extras/system/getent.py145
-rw-r--r--lib/ansible/modules/extras/system/gluster_volume.py492
-rw-r--r--lib/ansible/modules/extras/system/iptables.py536
-rw-r--r--lib/ansible/modules/extras/system/kernel_blacklist.py141
-rw-r--r--lib/ansible/modules/extras/system/known_hosts.py283
-rw-r--r--lib/ansible/modules/extras/system/locale_gen.py239
-rw-r--r--lib/ansible/modules/extras/system/lvg.py255
-rw-r--r--lib/ansible/modules/extras/system/lvol.py434
-rw-r--r--lib/ansible/modules/extras/system/make.py127
-rw-r--r--lib/ansible/modules/extras/system/modprobe.py124
-rw-r--r--lib/ansible/modules/extras/system/ohai.py56
-rw-r--r--lib/ansible/modules/extras/system/open_iscsi.py375
-rw-r--r--lib/ansible/modules/extras/system/osx_defaults.py386
-rw-r--r--lib/ansible/modules/extras/system/pam_limits.py270
-rw-r--r--lib/ansible/modules/extras/system/puppet.py281
-rw-r--r--lib/ansible/modules/extras/system/sefcontext.py246
-rw-r--r--lib/ansible/modules/extras/system/selinux_permissive.py133
-rw-r--r--lib/ansible/modules/extras/system/seport.py305
-rw-r--r--lib/ansible/modules/extras/system/solaris_zone.py456
-rwxr-xr-xlib/ansible/modules/extras/system/svc.py302
-rw-r--r--lib/ansible/modules/extras/system/timezone.py462
-rw-r--r--lib/ansible/modules/extras/system/ufw.py289
-rw-r--r--lib/ansible/modules/extras/system/zfs.py259
-rw-r--r--lib/ansible/modules/extras/test/integrations/group_vars/all.yml1
-rw-r--r--lib/ansible/modules/extras/test/integrations/roles/ec2_vpc_nat_gateway/tasks/main.yml76
-rw-r--r--lib/ansible/modules/extras/test/integrations/site.yml3
-rw-r--r--lib/ansible/modules/extras/test/unit/cloud/amazon/test_ec2_vpc_nat_gateway.py486
-rw-r--r--lib/ansible/modules/extras/test/unit/cloud/amazon/test_kinesis_stream.py285
-rwxr-xr-xlib/ansible/modules/extras/test/utils/shippable/ci.sh7
-rw-r--r--lib/ansible/modules/extras/test/utils/shippable/docs-requirements.txt2
-rwxr-xr-xlib/ansible/modules/extras/test/utils/shippable/docs.sh62
-rwxr-xr-xlib/ansible/modules/extras/test/utils/shippable/integration.sh55
-rw-r--r--lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python24.txt14
-rw-r--r--lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python3.txt85
-rw-r--r--lib/ansible/modules/extras/test/utils/shippable/sanity-test-python24.txt0
-rwxr-xr-xlib/ansible/modules/extras/test/utils/shippable/sanity.sh30
-rw-r--r--lib/ansible/modules/extras/univention/__init__.py0
-rw-r--r--lib/ansible/modules/extras/univention/udm_dns_record.py182
-rw-r--r--lib/ansible/modules/extras/univention/udm_dns_zone.py240
-rw-r--r--lib/ansible/modules/extras/univention/udm_group.py176
-rw-r--r--lib/ansible/modules/extras/univention/udm_share.py617
-rw-r--r--lib/ansible/modules/extras/univention/udm_user.py591
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/__init__.py0
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/apache2_mod_proxy.py429
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/deploy_helper.py478
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py219
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/jboss.py140
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/jenkins_job.py358
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/jenkins_plugin.py830
-rwxr-xr-xlib/ansible/modules/extras/web_infrastructure/jira.py359
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/letsencrypt.py795
-rw-r--r--lib/ansible/modules/extras/web_infrastructure/taiga_issue.py313
-rw-r--r--lib/ansible/modules/extras/windows/__init__.py0
-rw-r--r--lib/ansible/modules/extras/windows/win_acl.ps1183
-rw-r--r--lib/ansible/modules/extras/windows/win_acl.py141
-rw-r--r--lib/ansible/modules/extras/windows/win_acl_inheritance.ps186
-rw-r--r--lib/ansible/modules/extras/windows/win_acl_inheritance.py79
-rw-r--r--lib/ansible/modules/extras/windows/win_chocolatey.ps1371
-rw-r--r--lib/ansible/modules/extras/windows/win_chocolatey.py116
-rw-r--r--lib/ansible/modules/extras/windows/win_dotnet_ngen.ps169
-rw-r--r--lib/ansible/modules/extras/windows/win_dotnet_ngen.py44
-rw-r--r--lib/ansible/modules/extras/windows/win_environment.ps153
-rw-r--r--lib/ansible/modules/extras/windows/win_environment.py86
-rw-r--r--lib/ansible/modules/extras/windows/win_file_version.ps178
-rw-r--r--lib/ansible/modules/extras/windows/win_file_version.py85
-rw-r--r--lib/ansible/modules/extras/windows/win_firewall_rule.ps1362
-rw-r--r--lib/ansible/modules/extras/windows/win_firewall_rule.py123
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_virtualdirectory.ps1132
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_virtualdirectory.py67
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_webapplication.ps1132
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_webapplication.py68
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_webapppool.ps1123
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_webapppool.py112
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_webbinding.ps1131
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_webbinding.py137
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_website.ps1196
-rw-r--r--lib/ansible/modules/extras/windows/win_iis_website.py139
-rw-r--r--lib/ansible/modules/extras/windows/win_nssm.ps1630
-rw-r--r--lib/ansible/modules/extras/windows/win_nssm.py174
-rw-r--r--lib/ansible/modules/extras/windows/win_owner.ps1136
-rw-r--r--lib/ansible/modules/extras/windows/win_owner.py69
-rw-r--r--lib/ansible/modules/extras/windows/win_package.ps11326
-rw-r--r--lib/ansible/modules/extras/windows/win_package.py95
-rw-r--r--lib/ansible/modules/extras/windows/win_regedit.ps1237
-rw-r--r--lib/ansible/modules/extras/windows/win_regedit.py147
-rw-r--r--lib/ansible/modules/extras/windows/win_regmerge.ps1100
-rw-r--r--lib/ansible/modules/extras/windows/win_regmerge.py87
-rw-r--r--lib/ansible/modules/extras/windows/win_robocopy.ps1147
-rw-r--r--lib/ansible/modules/extras/windows/win_robocopy.py143
-rw-r--r--lib/ansible/modules/extras/windows/win_scheduled_task.ps1164
-rw-r--r--lib/ansible/modules/extras/windows/win_scheduled_task.py89
-rw-r--r--lib/ansible/modules/extras/windows/win_share.ps1251
-rw-r--r--lib/ansible/modules/extras/windows/win_share.py117
-rw-r--r--lib/ansible/modules/extras/windows/win_timezone.ps171
-rw-r--r--lib/ansible/modules/extras/windows/win_timezone.py49
-rw-r--r--lib/ansible/modules/extras/windows/win_unzip.ps1142
-rw-r--r--lib/ansible/modules/extras/windows/win_unzip.py106
-rw-r--r--lib/ansible/modules/extras/windows/win_updates.ps1424
-rw-r--r--lib/ansible/modules/extras/windows/win_updates.py137
-rw-r--r--lib/ansible/modules/extras/windows/win_uri.ps181
-rw-r--r--lib/ansible/modules/extras/windows/win_uri.py148
-rw-r--r--lib/ansible/modules/extras/windows/win_webpicmd.ps1132
-rw-r--r--lib/ansible/modules/extras/windows/win_webpicmd.py47
543 files changed, 146750 insertions, 0 deletions
diff --git a/lib/ansible/modules/extras/.github/ISSUE_TEMPLATE.md b/lib/ansible/modules/extras/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000000..7cc5b86027
--- /dev/null
+++ b/lib/ansible/modules/extras/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,55 @@
+<!--- Verify first that your issue/request is not already reported in GitHub -->
+
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest: -->
+ - Bug Report
+ - Feature Idea
+ - Documentation Report
+
+##### COMPONENT NAME
+<!--- Name of the plugin/module/task -->
+
+##### ANSIBLE VERSION
+<!--- Paste verbatim output from “ansible --version” between quotes below -->
+```
+
+```
+
+##### CONFIGURATION
+<!---
+Mention any settings you have changed/added/removed in ansible.cfg
+(or using the ANSIBLE_* environment variables).
+-->
+
+##### OS / ENVIRONMENT
+<!---
+Mention the OS you are running Ansible from, and the OS you are
+managing, or say “N/A” for anything that is not platform-specific.
+-->
+
+##### SUMMARY
+<!--- Explain the problem briefly -->
+
+##### STEPS TO REPRODUCE
+<!---
+For bugs, show exactly how to reproduce the problem.
+For new features, show how the feature would be used.
+-->
+
+<!--- Paste example playbooks or commands between quotes below -->
+```
+
+```
+
+<!--- You can also paste gist.github.com links for larger files -->
+
+##### EXPECTED RESULTS
+<!--- What did you expect to happen when running the steps above? -->
+
+##### ACTUAL RESULTS
+<!--- What actually happened? If possible run with high verbosity (-vvvv) -->
+
+<!--- Paste verbatim command output between quotes below -->
+```
+
+```
diff --git a/lib/ansible/modules/extras/.github/PULL_REQUEST_TEMPLATE.md b/lib/ansible/modules/extras/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000..5cfd027103
--- /dev/null
+++ b/lib/ansible/modules/extras/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,28 @@
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest: -->
+ - Feature Pull Request
+ - New Module Pull Request
+ - Bugfix Pull Request
+ - Docs Pull Request
+
+##### COMPONENT NAME
+<!--- Name of the plugin/module/task -->
+
+##### ANSIBLE VERSION
+<!--- Paste verbatim output from “ansible --version” between quotes below -->
+```
+
+```
+
+##### SUMMARY
+<!--- Describe the change, including rationale and design decisions -->
+
+<!---
+If you are fixing an existing issue, please include "Fixes #nnnn" in your commit
+message and your description; but you should still explain what the change does.
+-->
+
+<!--- Paste verbatim command output below, e.g. before and after your change -->
+```
+
+```
diff --git a/lib/ansible/modules/extras/.gitignore b/lib/ansible/modules/extras/.gitignore
new file mode 100644
index 0000000000..5fe1d994e3
--- /dev/null
+++ b/lib/ansible/modules/extras/.gitignore
@@ -0,0 +1,53 @@
+# build products...
+*.py[co]
+build
+AUTHORS.TXT
+# Emacs backup files...
+*~
+.\#*
+# RPM stuff...
+MANIFEST
+dist
+rpm-build
+# Eclipse/PyDev stuff...
+.project
+.pydevproject
+# PyCharm stuff...
+.idea
+#IntelliJ IDEA stuff..
+*.iml
+# Mac OS X stuff...
+.DS_Store
+# manpage build stuff...
+docs/man/man3/*
+# Sublime stuff
+*.sublime-project
+*.sublime-workspace
+# docsite stuff...
+docsite/rst/modules_by_category.rst
+docsite/rst/list_of_*.rst
+docsite/rst/*_module.rst
+docsite/*.html
+docsite/_static/*.gif
+docsite/_static/*.png
+docsite/_static/websupport.js
+docsite/searchindex.js
+docsite/htmlout
+# deb building stuff...
+debian/
+deb-build
+# Vim swap files
+*.swp
+*.swo
+credentials.yml
+# test output
+.coverage
+results.xml
+coverage.xml
+/test/units/cover-html
+# Development
+/test/develop
+venv
+Vagrantfile
+.vagrant
+ansible.egg-info/
diff --git a/lib/ansible/modules/extras/CONTRIBUTING.md b/lib/ansible/modules/extras/CONTRIBUTING.md
new file mode 100644
index 0000000000..60e850d6ed
--- /dev/null
+++ b/lib/ansible/modules/extras/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+Contributing to ansible-modules-extras
+======================================
+
+The Ansible Extras Modules are written and maintained by the Ansible community, according to the following contribution guidelines.
+
+If you'd like to contribute code
+================================
+
+Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page.
+
+If you'd like to contribute code to an existing module
+======================================================
+Each module in Extras is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a "shipit" by the owner in the comments will be merged by the Ansible team.
+
+If you'd like to contribute a new module
+========================================
+Ansible welcomes new modules. Please be certain that you've read the [module maintainer guide and standards](./GUIDELINES.md) thoroughly before submitting your module.
+
+The Ansible community reviews new modules as often as possible, but please be patient; there are a lot of new module submissions in the pipeline, and it takes time to evaluate a new module for its adherence to module standards.
+
+Once your module is accepted, you become responsible for maintenance of that module, which means responding to pull requests and issues in a reasonably timely manner.
+
+If you'd like to ask a question
+===============================
+
+Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC.
+
+The Github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those.
+
+If you'd like to file a bug
+===========================
+
+Read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible-modules-extras/blob/devel/.github/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it.
+
+Also please make sure you are testing on the latest released version of Ansible or the development branch.
+
+Thanks!
diff --git a/lib/ansible/modules/extras/COPYING b/lib/ansible/modules/extras/COPYING
new file mode 100644
index 0000000000..10926e87f1
--- /dev/null
+++ b/lib/ansible/modules/extras/COPYING
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
diff --git a/lib/ansible/modules/extras/GUIDELINES.md b/lib/ansible/modules/extras/GUIDELINES.md
new file mode 100644
index 0000000000..2589a730d4
--- /dev/null
+++ b/lib/ansible/modules/extras/GUIDELINES.md
@@ -0,0 +1,73 @@
+# Module Maintainer Guidelines
+
+Thank you for being a maintainer of one of the modules in ansible-modules-extras! This guide provides module maintainers an overview of their responsibilities, resources for additional information, and links to helpful tools.
+
+In addition to the information below, module maintainers should be familiar with:
+* General Ansible community development practices (http://docs.ansible.com/ansible/community.html)
+* Documentation on module development (http://docs.ansible.com/ansible/developing_modules.html)
+* Any namespace-specific module guidelines (identified as GUIDELINES.md in the appropriate file tree).
+
+***
+
+# Maintainer Responsibilities
+
+When you contribute a new module to the ansible-modules-extras repository, you become the maintainer for that module once it has been merged. Maintainership empowers you with the authority to accept, reject, or request revisions to pull requests on your module -- but as they say, "with great power comes great responsibility."
+
+Maintainers of Ansible modules are expected to provide feedback, responses, or actions on pull requests or issues to the module(s) they maintain in a reasonably timely manner.
+
+It is also recommended that you occasionally revisit the [contribution guidelines](https://github.com/alikins/ansible-modules-extras/commit/c87795da5b0c95c67fea1608a5a2a4ec54cb3905), as they are continually refined. Occasionally, you may be requested to update your module to move it closer to the general accepted standard requirements; we hope for this to be infrequent, and will always be a request with a fair amount of lead time (ie: not by tomorrow!).
+
+Finally, following the ansible-devel mailing list can be a great way to participate in the broader Ansible community, and a place where you can influence the overall direction, quality, and goals of the Extras modules repository. If you're not on this relatively low-volume list, please join us here: https://groups.google.com/forum/#!forum/ansible-devel
+
+The Ansible community hopes that you will find that maintaining your module is as rewarding for you as having the module is for the wider community.
+
+***
+
+# Pull Requests, Issues, and Workflow
+
+## Pull Requests
+
+Module pull requests are located in the [ansible-modules-extras repository](https://github.com/ansible/ansible-modules-extras/pulls).
+
+Because of the high volume of pull requests, notification of PRs to specific modules are routed by an automated bot to the appropriate maintainer for handling. It is recommended that you set an appropriate notification process to receive notifications which mention your GitHub ID.
+
+## Issues
+
+Issues for modules, including bug reports, documentation bug reports, and feature requests, are tracked in the [ansible-modules-extras repository](https://github.com/ansible/ansible-modules-extras/issues).
+
+ Issues for modules are routed to their maintainers via an automated process. This process is still being refined, and currently depends upon the issue creator to provide adequate details (specifically, providing the proper module name) in order to route it correctly. If you are a maintainer of a specific module, it is recommended that you periodically search module issues for issues which mention your module's name (or some variation on that name), as well as setting an appropriate notification process for receiving notification of mentions of your GitHub ID.
+
+## PR Workflow
+
+Automated routing of pull requests is handled by a tool called [Ansibullbot](https://github.com/ansible/ansibullbot). (You could say that he moooo-ves things around.)
+
+Being moderately familiar with how the workflow behind the bot operates can be helpful to you, and -- should things go awry -- your feedback can be helpful to the folks that continually help Ansibullbot to evolve.
+
+A detailed explanation of the PR workflow can be seen here: https://github.com/ansible/community/blob/master/PR-FLOW.md
+
+***
+
+# Extras maintainers list
+
+The full list of maintainers for modules in ansible-modules-extras is located here:
+https://github.com/ansible/ansibullbot/blob/master/MAINTAINERS-EXTRAS.txt
+
+## Changing Maintainership
+
+Communities change over time, and no one maintains a module forever. If you'd like to propose an additional maintainer for your module, please submit a PR to the maintainers file with the Github ID of the new maintainer.
+
+If you'd like to step down as a maintainer, please submit a PR to the maintainers file removing your Github ID from the module in question. If that would leave the module with no maintainers, put "ansible" as the maintainer. This will indicate that the module is temporarily without a maintainer, and the Ansible community team will search for a new maintainer.
+
+***
+
+# Tools and other Resources
+
+## Useful tools
+* https://ansible.sivel.net/pr/byfile.html -- a full list of all open Pull Requests, organized by file.
+* https://github.com/sivel/ansible-testing -- these are the tests that run on Shippable against all PRs for extras modules, so it's a good idea to run these tests locally first.
+
+## Other Resources
+
+* Module maintainer list: https://github.com/ansible/ansibullbot/blob/master/MAINTAINERS-EXTRAS.txt
+* Ansibullbot: https://github.com/ansible/ansibullbot
+* Triage / pull request workflow and information, including definitions for Labels in GitHub: https://github.com/ansible/community/blob/master/PR-FLOW.md
diff --git a/lib/ansible/modules/extras/MAINTAINERS.md b/lib/ansible/modules/extras/MAINTAINERS.md
new file mode 100644
index 0000000000..c4370110e9
--- /dev/null
+++ b/lib/ansible/modules/extras/MAINTAINERS.md
@@ -0,0 +1 @@
+Please refer to [GUIDELINES.md](./GUIDELINES.md) for the updated contributor guidelines.
diff --git a/lib/ansible/modules/extras/README.md b/lib/ansible/modules/extras/README.md
new file mode 100644
index 0000000000..7b860ba714
--- /dev/null
+++ b/lib/ansible/modules/extras/README.md
@@ -0,0 +1,28 @@
+[![Build Status](https://api.shippable.com/projects/573f79d02a8192902e20e34f/badge?branch=devel)](https://app.shippable.com/projects/573f79d02a8192902e20e34f)
+
+ansible-modules-extras
+======================
+
+This repo contains a subset of ansible-modules with slightly lower use or priority than "core" modules.
+
+All new modules should be submitted here, and have a chance to be promoted to core over time.
+
+Reporting bugs
+==============
+
+Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/).
+
+Testing modules
+===============
+
+Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that.
+
+License
+=======
+
+As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license.
+
+Installation
+============
+
+There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods.
diff --git a/lib/ansible/modules/extras/REVIEWERS.md b/lib/ansible/modules/extras/REVIEWERS.md
new file mode 100644
index 0000000000..fe7392d7f0
--- /dev/null
+++ b/lib/ansible/modules/extras/REVIEWERS.md
@@ -0,0 +1,58 @@
+Ansible Extras Reviewers
+====================
+The Ansible Extras Modules are written and maintained by the Ansible community, and are included in Extras through a community-driven approval process.
+
+Expectations
+=======
+
+1. New modules will be tested in good faith by users who care about them.
+2. New modules will adhere to the module guidelines, located here: http://docs.ansible.com/ansible/developing_modules.html#module-checklist
+3. The submitter of the module is willing and able to maintain the module over time.
+
+New Modules
+=======
+
+New modules are subject to review by anyone in the Ansible community. For inclusion of a new module into Ansible Extras, a pull request must receive at least one approval from a fellow community member on each of the following criteria:
+
+* One "worksforme" approval from someone who has thoroughly tested the module, including all parameters and switches.
+* One "passes_guidelines" approval from someone who has vetted the code according to the module guidelines.
+
+Either of these approvals can be given, in a comment, by anybody (except the submitter).
+
+Any module that has both of these, and no "needs_revision" votes (which can also be given by anybody) will be approved for inclusion.
+
+The core team will continue to be the point of escalation for any issues that may arise (duplicate modules, disagreements over guidelines, etc.)
+
+Existing Modules
+=======
+
+PRs made against existing modules in Extras are subject to review by the module author or current maintainer.
+
+Unmaintained Modules
+=======
+
+If modules in Extras go unmaintained, we will seek new maintainers, and if we don't find new
+maintainers, we will ultimately deprecate them.
+
+Subject Matter Experts
+=======
+
+Subject matter experts are groups of acknowledged community members who have expertise and experience in particular modules. Pull requests for existing or new modules are sometimes referred to these wider groups during triage, for expedience or escalation.
+
+Openstack: @emonty @shrews @dguerri @juliakreger @j2sol @rcarrillocruz
+
+Windows: @trondhindenes @petemounce @elventear @smadam813 @jhawkesworth @angstwad @sivel @chrishoffman @cchurch
+
+AWS: @jsmartin @scicoin-project @tombamford @garethr @jarv @jsdalton @silviud @adq @zbal @zeekin @willthames @lwade @carsongee @defionscode @tastychutney @bpennypacker @loia
+
+Docker: @cove @joshuaconner @softzilla @smashwilson
+
+Red Hat Network: @barnabycourt @vritant @flossware
+
+Zabbix: @cove @harrisongu @abulimov
+
+PR Process
+=======
+
+A full view of the pull request process for Extras can be seen here:
+![here](http://gregdek.org/extras_PR_process_2015_09.png)
diff --git a/lib/ansible/modules/extras/VERSION b/lib/ansible/modules/extras/VERSION
new file mode 100644
index 0000000000..47c909bbc5
--- /dev/null
+++ b/lib/ansible/modules/extras/VERSION
@@ -0,0 +1 @@
+2.0.0-0.5.beta3
diff --git a/lib/ansible/modules/extras/__init__.py b/lib/ansible/modules/extras/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/__init__.py b/lib/ansible/modules/extras/cloud/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/amazon/GUIDELINES.md b/lib/ansible/modules/extras/cloud/amazon/GUIDELINES.md
new file mode 100644
index 0000000000..b8ca836b79
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/GUIDELINES.md
@@ -0,0 +1,264 @@
+# Guidelines for AWS modules
+
+## Getting Started
+
+Since Ansible 2.0, it is a requirement that all new AWS modules are written to use boto3.
+
+Prior to 2.0, modules may of been written in boto or boto3. Modules written using boto can continue to be extended using boto.
+
+Backward compatibility of older modules must be maintained.
+
+## Bug fixing
+
+If you are writing a bugfix for a module that uses boto, you should continue to use boto to maintain backward compatibility.
+
+If you are adding new functionality to an existing module that uses boto but the new functionality requires boto3, you
+must maintain backward compatibility of the module and ensure the module still works without boto3.
+
+## Naming your module
+
+Base the name of the module on the part of AWS that
+you actually use. (A good rule of thumb is to take
+whatever module you use with boto as a starting point).
+
+Don't further abbreviate names - if something is a well
+known abbreviation due to it being a major component of
+AWS, that's fine, but don't create new ones independently
+(e.g. VPC, ELB, etc. are fine)
+
+## Adding new features
+
+Try and keep backward compatibility with relatively recent
+versions of boto. That means that if want to implement some
+functionality that uses a new feature of boto, it should only
+fail if that feature actually needs to be run, with a message
+saying which version of boto is needed.
+
+Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`)
+to check whether boto supports a feature rather than version checking
+
+e.g. from the `ec2` module:
+```python
+if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+else:
+ if instance_profile_name is not None:
+ module.fail_json(msg="instance_profile_name parameter requires boto version 2.5.0 or higher")
+```
+
+## Using boto and boto3
+
+### Importing
+
+Wrap import statements in a try block and fail the module later if the import fails
+
+#### boto
+
+```python
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def main():
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+```
+
+#### boto3
+
+```python
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def main():
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+```
+
+#### boto and boto3 combined
+
+If you want to add boto3 functionality to a module written using boto, you must maintain backward compatibility.
+Ensure that you clearly document if a new parameter requires boto3. Import boto3 at the top of the
+module as normal and then use the HAS_BOTO3 bool when necessary, before the new feature.
+
+```python
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+if my_new_feauture_Parameter_is_set:
+ if HAS_BOTO3:
+ # do feature
+ else:
+ module.fail_json(msg="boto3 is required for this feature")
+```
+
+### Connecting to AWS
+
+To connect to AWS, you should use `get_aws_connection_info` and then
+`connect_to_aws`.
+
+The reason for using `get_aws_connection_info` and `connect_to_aws` rather than doing it
+yourself is that they handle some of the more esoteric connection
+options such as security tokens and boto profiles.
+
+Some boto services require region to be specified. You should check for the region parameter if required.
+
+#### boto
+
+An example of connecting to ec2:
+
+```python
+region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+else:
+ module.fail_json(msg="region must be specified")
+```
+
+#### boto3
+
+An example of connecting to ec2 is shown below. Note that there is no 'NoAuthHandlerFound' exception handling like in boto.
+Instead, an AuthFailure exception will be thrown when you use 'connection'. See exception handling.
+
+```python
+region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+else:
+ module.fail_json(msg="region must be specified")
+```
+
+### Exception Handling
+
+You should wrap any boto call in a try block. If an exception is thrown, it is up to you decide how to handle it
+but usually calling fail_json with the error message will suffice.
+
+#### boto
+
+```python
+# Import BotoServerError
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+# Connect to AWS
+...
+
+# Make a call to AWS
+try:
+ result = connection.aws_call()
+except BotoServerError, e:
+ module.fail_json(msg=e.message)
+```
+
+#### boto3
+
+For more information on botocore exception handling see [http://botocore.readthedocs.org/en/latest/client_upgrades.html#error-handling]
+
+Boto3 provides lots of useful info when an exception is thrown so pass this to the user along with the message.
+
+```python
+# Import ClientError from botocore
+try:
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+# Connect to AWS
+...
+
+# Make a call to AWS
+try:
+ result = connection.aws_call()
+except ClientError, e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+```
+
+If you need to perform an action based on the error boto3 returned, use the error code.
+
+```python
+# Make a call to AWS
+try:
+ result = connection.aws_call()
+except ClientError, e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+```
+
+### Returning Values
+
+When you make a call using boto3, you will probably get back some useful information that you should return in the module.
+
+As well as information related to the call itself, you will also have some response metadata. It is OK to return this to
+the user as well as they may find it useful.
+
+Boto3 returns all values CamelCased. Ansible follows Python standards for variable names and uses snake_case. There is a
+helper function in module_utils/ec2.py called `camel_dict_to_snake_dict` that allows you to easily convert the boto3
+response to snake_case.
+
+You should use this helper function and avoid changing the names of values returned by Boto3. E.g. if boto3 returns a
+value called 'SecretAccessKey' do not change it to 'AccessKey'.
+
+```python
+# Make a call to AWS
+result = connection.aws_call()
+
+# Return the result to the user
+module.exit_json(changed=True, **camel_dict_to_snake_dict(result))
+```
+
+### Helper functions
+
+Along with the connection functions in Ansible ec2.py module_utils, there are some other useful functions detailed below.
+
+#### camel_dict_to_snake_dict
+
+boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping
+with Ansible format, this function will convert the keys to snake_case.
+
+#### ansible_dict_to_boto3_filter_list
+
+Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for
+any boto3 _facts modules.
+
+#### boto3_tag_list_to_ansible_dict
+
+Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys called
+'Key' and 'Value'. This function converts this list in to a single dict where the dict key is the tag
+key and the dict value is the tag value.
+
+#### ansible_dict_to_boto3_tag_list
+
+Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts.
+
+#### get_ec2_security_group_ids_from_names
+
+Pass this function a list of security group names or combination of security group names and IDs and this function will
+return a list of IDs. You should also pass the VPC ID if known because security group names are not necessarily unique
+across VPCs. \ No newline at end of file
diff --git a/lib/ansible/modules/extras/cloud/amazon/__init__.py b/lib/ansible/modules/extras/cloud/amazon/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/amazon/cloudformation_facts.py b/lib/ansible/modules/extras/cloud/amazon/cloudformation_facts.py
new file mode 100644
index 0000000000..381e479fe6
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/cloudformation_facts.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cloudformation_facts
+short_description: Obtain facts about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+version_added: "2.2"
+author: Justin Menga (@jmenga)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack
+ required: true
+ all_facts:
+ description:
+ - Get all stack information for the stack
+ required: false
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack
+ required: false
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack
+ required: false
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack
+ required: false
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack
+ required: false
+ default: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get summary information about a stack
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+# Facts are published in ansible_facts['cloudformation'][<stack_name>]
+- debug: msg={{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}
+
+# Get all stack information about a stack
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Example dictionary outputs for stack_outputs, stack_parameters and stack_resources:
+"stack_outputs": {
+ "ApplicationDatabaseName": "dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com",
+ ...
+},
+"stack_parameters": {
+ "DatabaseEngine": "mysql",
+ "DatabasePassword": "****",
+ ...
+},
+"stack_resources": {
+ "AutoscalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7",
+ "AutoscalingSecurityGroup": "sg-abcd1234",
+ "ApplicationDatabase": "dazvlpr01xj55a",
+ "EcsTaskDefinition": "arn:aws:ecs:ap-southeast-2:123456789:task-definition/dev-someapp-EcsTaskDefinition-1F2VM9QB0I7K9:1"
+ ...
+}
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: always
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter
+ returned: always
+ type: dict
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter
+ returned: always
+ type: dict
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true
+ type: list of events
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resourses is true
+ type: list of resources
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resourses is true
+ type: dict
+'''
+
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from functools import partial
+import json
+import traceback
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ self.client = boto3_conn(module, conn_type='client',
+ resource='cloudformation', region=region,
+ endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoRegionError:
+ self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION environment variable or in boto configuration file")
+ except Exception as e:
+ self.module.fail_json(msg="Can't establish connection - " + str(e), exception=traceback.format_exc(e))
+
+ def describe_stack(self, stack_name):
+ try:
+ func = partial(self.client.describe_stacks,StackName=stack_name)
+ response = self.paginated_response(func, 'Stacks')
+ if response:
+ return response[0]
+ self.module.fail_json(msg="Error describing stack - an empty response was returned")
+ except Exception as e:
+ self.module.fail_json(msg="Error describing stack - " + str(e), exception=traceback.format_exc(e))
+
+ def list_stack_resources(self, stack_name):
+ try:
+ func = partial(self.client.list_stack_resources,StackName=stack_name)
+ return self.paginated_response(func, 'StackResourceSummaries')
+ except Exception as e:
+ self.module.fail_json(msg="Error listing stack resources - " + str(e), exception=traceback.format_exc(e))
+
+ def describe_stack_events(self, stack_name):
+ try:
+ func = partial(self.client.describe_stack_events,StackName=stack_name)
+ return self.paginated_response(func, 'StackEvents')
+ except Exception as e:
+ self.module.fail_json(msg="Error describing stack events - " + str(e), exception=traceback.format_exc(e))
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.client.get_stack_policy(StackName=stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except Exception as e:
+ self.module.fail_json(msg="Error getting stack policy - " + str(e), exception=traceback.format_exc(e))
+
+ def get_template(self, stack_name):
+ try:
+ response = self.client.get_template(StackName=stack_name)
+ return response.get('TemplateBody')
+ except Exception as e:
+ self.module.fail_json(msg="Error getting stack template - " + str(e), exception=traceback.format_exc(e))
+
+ def paginated_response(self, func, result_key, next_token=None):
+ '''
+ Returns expanded response for paginated operations.
+ The 'result_key' is used to define the concatenated results that are combined from each paginated response.
+ '''
+ args=dict()
+ if next_token:
+ args['NextToken'] = next_token
+ response = func(**args)
+ result = response.get(result_key)
+ next_token = response.get('NextToken')
+ if not next_token:
+ return result
+ return result + self.paginated_response(func, result_key, next_token)
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i[key] for i in items], [i[value] for i in items]))
+ else:
+ return dict()
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ stack_name=dict(required=True, type='str' ),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ # Describe the stack
+ service_mgr = CloudFormationServiceManager(module)
+ stack_name = module.params.get('stack_name')
+ result = {
+ 'ansible_facts': { 'cloudformation': { stack_name:{} } }
+ }
+ facts = result['ansible_facts']['cloudformation'][stack_name]
+ facts['stack_description'] = service_mgr.describe_stack(stack_name)
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue')
+
+ # normalize stack description API output
+ facts['stack_description'] = camel_dict_to_snake_dict(facts['stack_description'])
+ # camel2snake doesn't handle NotificationARNs properly, so let's fix that
+ facts['stack_description']['notification_arns'] = facts['stack_description'].pop('notification_ar_ns', [])
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+
+ result['changed'] = False
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/cloudtrail.py b/lib/ansible/modules/extras/cloud/amazon/cloudtrail.py
new file mode 100644
index 0000000000..557f2ebaae
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/cloudtrail.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: cloudtrail
+short_description: manage CloudTrail creation and deletion
+description:
+ - Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
+version_added: "2.0"
+author:
+ - "Ansible Core Team"
+ - "Ted Timmons"
+requirements:
+ - "boto >= 2.21"
+options:
+ state:
+ description:
+ - add or remove CloudTrail configuration.
+ required: true
+ choices: ['enabled', 'disabled']
+ name:
+ description:
+ - name for given CloudTrail configuration.
+ - This is a primary key and is used to identify the configuration.
+ s3_bucket_prefix:
+ description:
+ - bucket to place CloudTrail in.
+ - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
+ - required when state=enabled.
+ required: false
+ s3_key_prefix:
+ description:
+ - prefix to keys in bucket. A trailing slash is not necessary and will be removed.
+ required: false
+ include_global_events:
+ description:
+ - record API calls from global services such as IAM and STS?
+ required: false
+ default: false
+ choices: ["true", "false"]
+
+ aws_secret_key:
+ description:
+ - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ required: false
+ default: null
+ aliases: [ 'ec2_secret_key', 'secret_key' ]
+ version_added: "1.5"
+ aws_access_key:
+ description:
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ required: false
+ default: null
+ aliases: [ 'ec2_access_key', 'access_key' ]
+ version_added: "1.5"
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+ version_added: "1.5"
+
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = """
+ - name: enable cloudtrail
+ local_action: cloudtrail
+ state=enabled name=main s3_bucket_name=ourbucket
+ s3_key_prefix=cloudtrail region=us-east-1
+
+ - name: enable cloudtrail with different configuration
+ local_action: cloudtrail
+ state=enabled name=main s3_bucket_name=ourbucket2
+ s3_key_prefix='' region=us-east-1
+
+ - name: remove cloudtrail
+ local_action: cloudtrail state=disabled name=main region=us-east-1
+"""
+
+HAS_BOTO = False
+try:
+ import boto
+ import boto.cloudtrail
+ from boto.regioninfo import RegionInfo
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+class CloudTrailManager:
+ """Handles cloudtrail configuration"""
+
+ def __init__(self, module, region=None, **aws_connect_params):
+ self.module = module
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.changed = False
+
+ try:
+ self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ self.module.fail_json(msg=str(e))
+
+ def view_status(self, name):
+ return self.conn.get_trail_status(name)
+
+ def view(self, name):
+ ret = self.conn.describe_trails(trail_name_list=[name])
+ trailList = ret.get('trailList', [])
+ if len(trailList) == 1:
+ return trailList[0]
+ return None
+
+ def exists(self, name=None):
+ ret = self.view(name)
+ if ret:
+ return True
+ return False
+
+ def enable_logging(self, name):
+ '''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
+ self.conn.start_logging(name)
+
+
+ def enable(self, **create_args):
+ return self.conn.create_trail(**create_args)
+
+ def update(self, **create_args):
+ return self.conn.update_trail(**create_args)
+
+ def delete(self, name):
+ '''Delete a given cloudtrial configuration. Throws Exception on error.'''
+ self.conn.delete_trail(name)
+
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state={'required': True, 'choices': ['enabled', 'disabled'] },
+ name={'required': True, 'type': 'str' },
+ s3_bucket_name={'required': False, 'type': 'str' },
+ s3_key_prefix={'default':'', 'required': False, 'type': 'str' },
+ include_global_events={'default':True, 'required': False, 'type': 'bool' },
+ ))
+ required_together = ( ['state', 's3_bucket_name'] )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ ec2_url, access_key, secret_key, region = get_ec2_creds(module)
+ aws_connect_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ ct_name = module.params['name']
+ s3_bucket_name = module.params['s3_bucket_name']
+ # remove trailing slash from the key prefix, really messes up the key structure.
+ s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
+ include_global_events = module.params['include_global_events']
+
+ #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
+ # module.fail_json(msg="ELBs are required for registration or viewing")
+
+ cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
+
+ results = { 'changed': False }
+ if module.params['state'] == 'enabled':
+ results['exists'] = cf_man.exists(name=ct_name)
+ if results['exists']:
+ results['view'] = cf_man.view(ct_name)
+ # only update if the values have changed.
+ if results['view']['S3BucketName'] != s3_bucket_name or \
+ results['view']['S3KeyPrefix'] != s3_key_prefix or \
+ results['view']['IncludeGlobalServiceEvents'] != include_global_events:
+ if not module.check_mode:
+ results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
+ results['changed'] = True
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
+ results['changed'] = True
+
+ # given cloudtrail should exist now. Enable the logging.
+ results['view_status'] = cf_man.view_status(ct_name)
+ results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
+ if not results['was_logging_enabled']:
+ if not module.check_mode:
+ cf_man.enable_logging(ct_name)
+ results['logging_enabled'] = True
+ results['changed'] = True
+
+ # delete the cloudtrai
+ elif module.params['state'] == 'disabled':
+ # check to see if it exists before deleting.
+ results['exists'] = cf_man.exists(name=ct_name)
+ if results['exists']:
+ # it exists, so we should delete it and mark changed.
+ if not module.check_mode:
+ cf_man.delete(ct_name)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/cloudwatchevent_rule.py b/lib/ansible/modules/extras/cloud/amazon/cloudwatchevent_rule.py
new file mode 100644
index 0000000000..8fda1c125a
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/cloudwatchevent_rule.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cloudwatchevent_rule
+short_description: Manage CloudWatch Event rules and targets
+description:
+ - This module creates and manages CloudWatch event rules and targets.
+version_added: "2.2"
+extends_documentation_fragment:
+ - aws
+author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
+requirements:
+ - python >= 2.6
+ - boto3
+notes:
+ - A rule must contain at least an I(event_pattern) or I(schedule_expression). A
+ rule can have both an I(event_pattern) and a I(schedule_expression), in which
+ case the rule will trigger on matching events as well as on a schedule.
+ - When specifying targets, I(input) and I(input_path) are mutually-exclusive
+ and optional parameters.
+options:
+ name:
+ description:
+ - The name of the rule you are creating, updating or deleting. No spaces
+ or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+))
+ required: true
+ schedule_expression:
+ description:
+ - A cron or rate expression that defines the schedule the rule will
+ trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes))
+ required: false
+ event_pattern:
+ description:
+ - A string pattern (in valid JSON format) that is used to match against
+ incoming events to determine if the rule should be triggered
+ required: false
+ state:
+ description:
+ - Whether the rule is present (and enabled), disabled, or absent
+ choices: ["present", "disabled", "absent"]
+ default: present
+ required: false
+ description:
+ description:
+ - A description of the rule
+ required: false
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role associated with the rule
+ required: false
+ targets:
+ description:
+ - "A dictionary array of targets to add to or update for the rule, in the
+ form C({ id: [string], arn: [string], input: [valid JSON string], input_path: [valid JSONPath string] }).
+ I(id) [required] is the unique target assignment ID. I(arn) (required)
+ is the Amazon Resource Name associated with the target. I(input)
+ (optional) is a JSON object that will override the event data when
+ passed to the target. I(input_path) (optional) is a JSONPath string
+ (e.g. C($.detail)) that specifies the part of the event data to be
+ passed to the target. If neither I(input) nor I(input_path) is
+ specified, then the entire event is passed to the target in JSON form."
+ required: false
+'''
+
+EXAMPLES = '''
+- cloudwatchevent_rule:
+ name: MyCronTask
+ schedule_expression: "cron(0 20 * * ? *)"
+ description: Run my scheduled task
+ targets:
+ - id: MyTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+
+- cloudwatchevent_rule:
+ name: MyDisabledCronTask
+ schedule_expression: "cron(5 minutes)"
+ description: Run my disabled scheduled task
+ state: disabled
+ targets:
+ - id: MyOtherTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+ input: '{"foo": "bar"}'
+
+- cloudwatchevent_rule: name=MyCronTask state=absent
+'''
+
+RETURN = '''
+rule:
+ description: CloudWatch Event rule data
+ returned: success
+ type: dict
+ sample: "{ 'arn': 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask', 'description': 'Run my scheduled task', 'name': 'MyCronTask', 'schedule_expression': 'cron(0 20 * * ? *)', 'state': 'ENABLED' }"
+targets:
+ description: CloudWatch Event target(s) assigned to the rule
+ returned: success
+ type: list
+ sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
+'''
+
+
+class CloudWatchEventRule(object):
+ def __init__(self, module, name, client, schedule_expression=None,
+ event_pattern=None, description=None, role_arn=None):
+ self.name = name
+ self.client = client
+ self.changed = False
+ self.schedule_expression = schedule_expression
+ self.event_pattern = event_pattern
+ self.description = description
+ self.role_arn = role_arn
+
+ def describe(self):
+ """Returns the existing details of the rule in AWS"""
+ try:
+ rule_info = self.client.describe_rule(Name=self.name)
+ except botocore.exceptions.ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code')
+ if error_code == 'ResourceNotFoundException':
+ return {}
+ raise
+ return self._snakify(rule_info)
+
+ def put(self, enabled=True):
+ """Creates or updates the rule in AWS"""
+ request = {
+ 'Name': self.name,
+ 'State': "ENABLED" if enabled else "DISABLED",
+ }
+ if self.schedule_expression:
+ request['ScheduleExpression'] = self.schedule_expression
+ if self.event_pattern:
+ request['EventPattern'] = self.event_pattern
+ if self.description:
+ request['Description'] = self.description
+ if self.role_arn:
+ request['RoleArn'] = self.role_arn
+ response = self.client.put_rule(**request)
+ self.changed = True
+ return response
+
+ def delete(self):
+ """Deletes the rule in AWS"""
+ self.remove_all_targets()
+ response = self.client.delete_rule(Name=self.name)
+ self.changed = True
+ return response
+
+ def enable(self):
+ """Enables the rule in AWS"""
+ response = self.client.enable_rule(Name=self.name)
+ self.changed = True
+ return response
+
+ def disable(self):
+ """Disables the rule in AWS"""
+ response = self.client.disable_rule(Name=self.name)
+ self.changed = True
+ return response
+
+ def list_targets(self):
+ """Lists the existing targets for the rule in AWS"""
+ try:
+ targets = self.client.list_targets_by_rule(Rule=self.name)
+ except botocore.exceptions.ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code')
+ if error_code == 'ResourceNotFoundException':
+ return []
+ raise
+ return self._snakify(targets)['targets']
+
+ def put_targets(self, targets):
+ """Creates or updates the provided targets on the rule in AWS"""
+ if not targets:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Targets': self._targets_request(targets),
+ }
+ response = self.client.put_targets(**request)
+ self.changed = True
+ return response
+
+ def remove_targets(self, target_ids):
+ """Removes the provided targets from the rule in AWS"""
+ if not target_ids:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Ids': target_ids
+ }
+ response = self.client.remove_targets(**request)
+ self.changed = True
+ return response
+
+ def remove_all_targets(self):
+ """Removes all targets on rule"""
+ targets = self.list_targets()
+ return self.remove_targets([t['id'] for t in targets])
+
+ def _targets_request(self, targets):
+ """Formats each target for the request"""
+ targets_request = []
+ for target in targets:
+ target_request = {
+ 'Id': target['id'],
+ 'Arn': target['arn']
+ }
+ if 'input' in target:
+ target_request['Input'] = target['input']
+ if 'input_path' in target:
+ target_request['InputPath'] = target['input_path']
+ targets_request.append(target_request)
+ return targets_request
+
+ def _snakify(self, dict):
+ """Converts cammel case to snake case"""
+ return camel_dict_to_snake_dict(dict)
+
+
+class CloudWatchEventRuleManager(object):
+ RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
+
+ def __init__(self, rule, targets):
+ self.rule = rule
+ self.targets = targets
+
+ def ensure_present(self, enabled=True):
+ """Ensures the rule and targets are present and synced"""
+ rule_description = self.rule.describe()
+ if rule_description:
+ # Rule exists so update rule, targets and state
+ self._sync_rule(enabled)
+ self._sync_targets()
+ self._sync_state(enabled)
+ else:
+ # Rule does not exist, so create new rule and targets
+ self._create(enabled)
+
+ def ensure_disabled(self):
+ """Ensures the rule and targets are present, but disabled, and synced"""
+ self.ensure_present(enabled=False)
+
+ def ensure_absent(self):
+ """Ensures the rule and targets are absent"""
+ rule_description = self.rule.describe()
+ if not rule_description:
+ # Rule doesn't exist so don't need to delete
+ return
+ self.rule.delete()
+
+ def fetch_aws_state(self):
+ """Retrieves rule and target state from AWS"""
+ aws_state = {
+ 'rule': {},
+ 'targets': [],
+ 'changed': self.rule.changed
+ }
+ rule_description = self.rule.describe()
+ if not rule_description:
+ return aws_state
+
+ # Don't need to include response metadata noise in response
+ del rule_description['response_metadata']
+
+ aws_state['rule'] = rule_description
+ aws_state['targets'].extend(self.rule.list_targets())
+ return aws_state
+
+ def _sync_rule(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ if not self._rule_matches_aws():
+ self.rule.put(enabled)
+
+ def _sync_targets(self):
+ """Syncs local targets with AWS"""
+ # Identify and remove extraneous targets on AWS
+ target_ids_to_remove = self._remote_target_ids_to_remove()
+ if target_ids_to_remove:
+ self.rule.remove_targets(target_ids_to_remove)
+
+ # Identify targets that need to be added or updated on AWS
+ targets_to_put = self._targets_to_put()
+ if targets_to_put:
+ self.rule.put_targets(targets_to_put)
+
+ def _sync_state(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ remote_state = self._remote_state()
+ if enabled and remote_state != 'ENABLED':
+ self.rule.enable()
+ elif not enabled and remote_state != 'DISABLED':
+ self.rule.disable()
+
+ def _create(self, enabled=True):
+ """Creates rule and targets on AWS"""
+ self.rule.put(enabled)
+ self.rule.put_targets(self.targets)
+
+ def _rule_matches_aws(self):
+ """Checks if the local rule data matches AWS"""
+ aws_rule_data = self.rule.describe()
+
+ # The rule matches AWS only if all rule data fields are equal
+ # to their corresponding local value defined in the task
+ return all([
+ getattr(self.rule, field) == aws_rule_data.get(field, None)
+ for field in self.RULE_FIELDS
+ ])
+
+ def _targets_to_put(self):
+ """Returns a list of targets that need to be updated or added remotely"""
+ remote_targets = self.rule.list_targets()
+ return [t for t in self.targets if t not in remote_targets]
+
+ def _remote_target_ids_to_remove(self):
+ """Returns a list of targets that need to be removed remotely"""
+ target_ids = [t['id'] for t in self.targets]
+ remote_targets = self.rule.list_targets()
+ return [
+ rt['id'] for rt in remote_targets if rt['id'] not in target_ids
+ ]
+
+ def _remote_state(self):
+ """Returns the remote state from AWS"""
+ description = self.rule.describe()
+ if not description:
+ return
+ return description['state']
+
+
+def get_cloudwatchevents_client(module):
+ """Returns a boto3 client for accessing CloudWatch Events"""
+ try:
+ region, ec2_url, aws_conn_kwargs = get_aws_connection_info(module,
+ boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in \
+ EC2_REGION or AWS_REGION environment variables \
+ or in boto configuration file")
+ return boto3_conn(module, conn_type='client',
+ resource='events',
+ region=region, endpoint=ec2_url,
+ **aws_conn_kwargs)
+ except boto3.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ schedule_expression = dict(),
+ event_pattern = dict(),
+ state = dict(choices=['present', 'disabled', 'absent'],
+ default='present'),
+ description = dict(),
+ role_arn = dict(),
+ targets = dict(type='list', default=[]),
+ ))
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ rule_data = dict(
+ [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
+ )
+ targets = module.params.get('targets')
+ state = module.params.get('state')
+
+ cwe_rule = CloudWatchEventRule(module,
+ client=get_cloudwatchevents_client(module),
+ **rule_data)
+ cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
+
+ if state == 'present':
+ cwe_rule_manager.ensure_present()
+ elif state == 'disabled':
+ cwe_rule_manager.ensure_disabled()
+ elif state == 'absent':
+ cwe_rule_manager.ensure_absent()
+ else:
+ module.fail_json(msg="Invalid state '{0}' provided".format(state))
+
+ module.exit_json(**cwe_rule_manager.fetch_aws_state())
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/dynamodb_table.py b/lib/ansible/modules/extras/cloud/amazon/dynamodb_table.py
new file mode 100644
index 0000000000..ceafbdea9b
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/dynamodb_table.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: dynamodb_table
+short_description: Create, update or delete AWS Dynamo DB tables.
+version_added: "2.0"
+description:
+ - Create or delete AWS Dynamo DB tables.
+ - Can update the provisioned throughput on existing tables.
+ - Returns the status of the specified table.
+author: Alan Loi (@loia)
+requirements:
+ - "boto >= 2.37.0"
+options:
+ state:
+ description:
+ - Create or delete the table
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Name of the table.
+ required: true
+ hash_key_name:
+ description:
+ - Name of the hash key.
+ - Required when C(state=present).
+ required: false
+ default: null
+ hash_key_type:
+ description:
+ - Type of the hash key.
+ required: false
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ range_key_name:
+ description:
+ - Name of the range key.
+ required: false
+ default: null
+ range_key_type:
+ description:
+ - Type of the range key.
+ required: false
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision.
+ required: false
+ default: 1
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision.
+ required: false
+ default: 1
+ indexes:
+ description:
+ - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
+ - "required options: ['name', 'type', 'hash_key_name']"
+ - "valid types: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']"
+ - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
+ required: false
+ default: []
+ version_added: "2.1"
+extends_documentation_fragment:
+ - aws
+ - ec2
+"""
+
+EXAMPLES = '''
+# Create dynamo table with hash and range primary key
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ hash_key_name: id
+ hash_key_type: STRING
+ range_key_name: create_time
+ range_key_type: NUMBER
+ read_capacity: 2
+ write_capacity: 2
+
+# Update capacity on existing dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ read_capacity: 10
+ write_capacity: 10
+
+# set index on existing dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ indexes:
+ - name: NamedIndex
+ type: global_include
+ hash_key_name: id
+ range_key_name: create_time
+ includes:
+ - other_field
+ - other_field2
+ read_capacity: 10
+ write_capacity: 10
+
+# Delete dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ state: absent
+'''
+
+RETURN = '''
+table_status:
+ description: The current status of the table.
+ returned: success
+ type: string
+ sample: ACTIVE
+'''
+
+try:
+ import boto
+ import boto.dynamodb2
+ from boto.dynamodb2.table import Table
+ from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
+ from boto.dynamodb2.types import STRING, NUMBER, BINARY
+ from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
+ from boto.dynamodb2.exceptions import ValidationException
+ HAS_BOTO = True
+
+ DYNAMO_TYPE_MAP = {
+ 'STRING': STRING,
+ 'NUMBER': NUMBER,
+ 'BINARY': BINARY
+ }
+
+except ImportError:
+ HAS_BOTO = False
+
+DYNAMO_TYPE_DEFAULT = 'STRING'
+INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
+INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
+INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
+
+
+def create_or_update_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+ hash_key_name = module.params.get('hash_key_name')
+ hash_key_type = module.params.get('hash_key_type')
+ range_key_name = module.params.get('range_key_name')
+ range_key_type = module.params.get('range_key_type')
+ read_capacity = module.params.get('read_capacity')
+ write_capacity = module.params.get('write_capacity')
+ all_indexes = module.params.get('indexes')
+
+ for index in all_indexes:
+ validate_index(index, module)
+
+ schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
+
+ throughput = {
+ 'read': read_capacity,
+ 'write': write_capacity
+ }
+
+ indexes, global_indexes = get_indexes(all_indexes)
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ hash_key_name=hash_key_name,
+ hash_key_type=hash_key_type,
+ range_key_name=range_key_name,
+ range_key_type=range_key_type,
+ read_capacity=read_capacity,
+ write_capacity=write_capacity,
+ indexes=all_indexes,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+
+ if dynamo_table_exists(table):
+ result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
+ else:
+ if not module.check_mode:
+ Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
+ result['changed'] = True
+
+ if not module.check_mode:
+ result['table_status'] = table.describe()['Table']['TableStatus']
+
+ except BotoServerError:
+ result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def delete_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+ if dynamo_table_exists(table):
+ if not module.check_mode:
+ table.delete()
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ except BotoServerError:
+ result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def dynamo_table_exists(table):
+ try:
+ table.describe()
+ return True
+
+ except JSONResponseError, e:
+ if e.message and e.message.startswith('Requested resource not found'):
+ return False
+ else:
+ raise e
+
+
+def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
+ table.describe() # populate table details
+ throughput_changed = False
+ global_indexes_changed = False
+ if has_throughput_changed(table, throughput):
+ if not check_mode:
+ throughput_changed = table.update(throughput=throughput)
+ else:
+ throughput_changed = True
+
+ removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
+ if removed_indexes:
+ if not check_mode:
+ for name, index in removed_indexes.iteritems():
+ global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
+ else:
+ global_indexes_changed = True
+
+ if added_indexes:
+ if not check_mode:
+ for name, index in added_indexes.iteritems():
+ global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
+ else:
+ global_indexes_changed = True
+
+ if index_throughput_changes:
+ if not check_mode:
+ # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
+ try:
+ global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
+ except ValidationException as e:
+ pass
+ else:
+ global_indexes_changed = True
+
+ return throughput_changed or global_indexes_changed
+
+
+def has_throughput_changed(table, new_throughput):
+ if not new_throughput:
+ return False
+
+ return new_throughput['read'] != table.throughput['read'] or \
+ new_throughput['write'] != table.throughput['write']
+
+
+def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
+ if range_key_name:
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
+ RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
+ ]
+ else:
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
+ ]
+ return schema
+
+
+def get_changed_global_indexes(table, global_indexes):
+ table.describe()
+
+ table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
+ table_index_objects = dict((index.name, index) for index in table.global_indexes)
+ set_index_info = dict((index.name, index.schema()) for index in global_indexes)
+ set_index_objects = dict((index.name, index) for index in global_indexes)
+
+ removed_indexes = dict((name, index) for name, index in table_index_info.iteritems() if name not in set_index_info)
+ added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.iteritems() if name not in table_index_info)
+ # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
+ # index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write'])))
+ # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
+ index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes)
+
+ return removed_indexes, added_indexes, index_throughput_changes
+
+
+def validate_index(index, module):
+ for key, val in index.iteritems():
+ if key not in INDEX_OPTIONS:
+ module.fail_json(msg='%s is not a valid option for an index' % key)
+ for required_option in INDEX_REQUIRED_OPTIONS:
+ if required_option not in index:
+ module.fail_json(msg='%s is a required option for an index' % required_option)
+ if index['type'] not in INDEX_TYPE_OPTIONS:
+ module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
+
+def get_indexes(all_indexes):
+ indexes = []
+ global_indexes = []
+ for index in all_indexes:
+ name = index['name']
+ schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
+ throughput = {
+ 'read': index.get('read_capacity', 1),
+ 'write': index.get('write_capacity', 1)
+ }
+
+ if index['type'] == 'all':
+ indexes.append(AllIndex(name, parts=schema))
+
+ elif index['type'] == 'global_all':
+ global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
+
+ elif index['type'] == 'global_include':
+ global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
+
+ elif index['type'] == 'global_keys_only':
+ global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
+
+ elif index['type'] == 'include':
+ indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
+
+ elif index['type'] == 'keys_only':
+ indexes.append(KeysOnlyIndex(name, parts=schema))
+
+ return indexes, global_indexes
+
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ hash_key_name=dict(required=True, type='str'),
+ hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ range_key_name=dict(type='str'),
+ range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ read_capacity=dict(default=1, type='int'),
+ write_capacity=dict(default=1, type='int'),
+ indexes=dict(default=[], type='list'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
+ except (NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_or_update_dynamo_table(connection, module)
+ elif state == 'absent':
+ delete_dynamo_table(connection, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_ami_copy.py b/lib/ansible/modules/extras/cloud/amazon/ec2_ami_copy.py
new file mode 100644
index 0000000000..88eba11089
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_ami_copy.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_copy
+short_description: copies AMI between AWS regions, return new image id
+description:
+ - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
+version_added: "2.0"
+options:
+ source_region:
+ description:
+ - the source region that AMI should be copied from
+ required: true
+ source_image_id:
+ description:
+ - the id of the image in source region that should be copied
+ required: true
+ name:
+ description:
+ - The name of the new image to copy
+ required: false
+ default: null
+ description:
+ description:
+ - An optional human-readable string describing the contents and purpose of the new AMI.
+ required: false
+ default: null
+ encrypted:
+ description:
+ - Whether or not to encrypt the target image
+ required: false
+ default: null
+ version_added: "2.2"
+ kms_key_id:
+ description:
+ - KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
+ required: false
+ default: null
+ version_added: "2.2"
+ wait:
+ description:
+ - wait for the copied AMI to be in state 'available' before returning.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ required: false
+ default: 1200
+ tags:
+ description:
+ - a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
+ required: false
+ default: null
+
+author: Amir Moulavi <amir.moulavi@gmail.com>
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Basic AMI Copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+
+# AMI copy wait until available
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ wait: yes
+ register: image_id
+
+# Named AMI copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ name: My-Awesome-AMI
+ description: latest patch
+
+# Tagged AMI copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ tags:
+ Name: My-Super-AMI
+ Patch: 1.2.3
+
+# Encrypted AMI copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: yes
+
+# Encrypted AMI copy with specified key
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: yes
+ kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
+'''
+
+try:
+ import boto
+ import boto.ec2
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def copy_image(module, ec2):
+ """
+ Copies an AMI
+
+ module : AnsibleModule object
+ ec2: authenticated ec2 connection object
+ """
+
+ source_region = module.params.get('source_region')
+ source_image_id = module.params.get('source_image_id')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ encrypted = module.params.get('encrypted')
+ kms_key_id = module.params.get('kms_key_id')
+ tags = module.params.get('tags')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ wait = module.params.get('wait')
+
+ try:
+ params = {'source_region': source_region,
+ 'source_image_id': source_image_id,
+ 'name': name,
+ 'description': description,
+ 'encrypted': encrypted,
+ 'kms_key_id': kms_key_id
+ }
+
+ image_id = ec2.copy_image(**params).image_id
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
+
+ img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait)
+
+ img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait)
+
+ register_tags_if_any(module, ec2, tags, image_id)
+
+ module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True)
+
+
+# register tags to the copied AMI
+def register_tags_if_any(module, ec2, tags, image_id):
+ if tags:
+ try:
+ ec2.create_tags([image_id], tags)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# wait here until the image is copied (i.e. the state becomes available
+def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait):
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
+ img = ec2.get_image(image_id)
+ time.sleep(3)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="timed out waiting for image to be copied")
+ return img
+
+
+# wait until the image is recognized.
+def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait):
+ for i in range(wait_timeout):
+ try:
+ return ec2.get_image(image_id)
+ except boto.exception.EC2ResponseError, e:
+ # This exception we expect initially right after registering the copy with EC2 API
+ if 'InvalidAMIID.NotFound' in e.error_code and wait:
+ time.sleep(1)
+ else:
+ # On any other exception we should fail
+ module.fail_json(
+ msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str(
+ e))
+ else:
+ module.fail_json(msg="timed out waiting for image to be recognized")
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ source_region=dict(required=True),
+ source_image_id=dict(required=True),
+ name=dict(),
+ description=dict(default=""),
+ encrypted=dict(type='bool', required=False),
+ kms_key_id=dict(type='str', required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(default=1200),
+ tags=dict(type='dict')))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ ec2 = ec2_connect(module)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ try:
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ copy_image(module, ec2)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_asg_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_asg_facts.py
new file mode 100644
index 0000000000..d6eb1dc611
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_asg_facts.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_asg_facts
+short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
+description:
+ - Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
+version_added: "2.2"
+author: "Rob White (@wimnat)"
+options:
+ name:
+ description:
+ - The prefix or name of the auto scaling group(s) you are searching for.
+ - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
+ required: false
+ tags:
+ description:
+ - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for."
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Find all groups
+- ec2_asg_facts:
+ register: asgs
+
+# Find a group with matching name/prefix
+- ec2_asg_facts:
+ name: public-webserver-asg
+ register: asgs
+
+# Find a group with matching tags
+- ec2_asg_facts:
+ tags:
+ project: webapp
+ env: production
+ register: asgs
+
+# Find a group with matching name/prefix and tags
+- ec2_asg_facts:
+ name: myproject
+ tags:
+ env: production
+ register: asgs
+
+# Fail if no groups are found
+- ec2_asg_facts:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length == 0 }}"
+
+# Fail if more than 1 group is found
+- ec2_asg_facts:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length > 1 }}"
+'''
+
+RETURN = '''
+---
+auto_scaling_group_arn:
+ description: The Amazon Resource Name of the ASG
+ returned: success
+ type: string
+ sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+availability_zones:
+ description: List of Availability Zones that are enabled for this ASG.
+ returned: success
+ type: list
+ sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
+created_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: string
+ sample: "2015-11-25T00:05:36.309Z"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+health_check_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+health_check_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+instances:
+ description: List of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: list
+ sample: [
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-es22ad25",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": "false"
+ }
+ ]
+launch_configuration_name:
+ description: Name of launch configuration associated with the ASG.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+load_balancer_names:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+new_instances_protected_from_scale_in:
+ description: Whether or not new instances a protected from automatic scaling in.
+ returned: success
+ type: boolean
+ sample: "false"
+placement_group:
+ description: Placement group into which instances are launched, if any.
+ returned: success
+ type: str
+ sample: None
+status:
+ description: The current state of the group when DeleteAutoScalingGroup is in progress.
+ returned: success
+ type: str
+ sample: None
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: str
+ sample: ["Default"]
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def match_asg_tags(tags_to_match, asg):
+ for key, value in tags_to_match.iteritems():
+ for tag in asg['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ break
+ else: return False
+ return True
+
+def find_asgs(conn, module, name=None, tags=None):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ name (str): Optional name of the ASG you are looking for.
+ tags (dict): Optional dictionary of tags and values to search for.
+
+ Basic Usage:
+ >>> name = 'public-webapp-production'
+ >>> tags = { 'env': 'production' }
+ >>> conn = boto3.client('autoscaling', region_name='us-west-2')
+ >>> results = find_asgs(name, conn)
+
+ Returns:
+ List
+ [
+ {
+ "auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production",
+ "auto_scaling_group_name": "public-webapp-production",
+ "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
+ "created_time": "2016-02-02T23:28:42.481000+00:00",
+ "default_cooldown": 300,
+ "desired_capacity": 2,
+ "enabled_metrics": [],
+ "health_check_grace_period": 300,
+ "health_check_type": "ELB",
+ "instances":
+ [
+ {
+ "availability_zone": "us-west-2c",
+ "health_status": "Healthy",
+ "instance_id": "i-047a12cb",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ },
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-7a29df2c",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ }
+ ],
+ "launch_configuration_name": "public-webapp-production-1",
+ "load_balancer_names": ["public-webapp-production-lb"],
+ "max_size": 4,
+ "min_size": 2,
+ "new_instances_protected_from_scale_in": false,
+ "placement_group": None,
+ "status": None,
+ "suspended_processes": [],
+ "tags":
+ [
+ {
+ "key": "Name",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "public-webapp-production"
+ },
+ {
+ "key": "env",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "production"
+ }
+ ],
+ "termination_policies":
+ [
+ "Default"
+ ],
+ "vpc_zone_identifier":
+ [
+ "subnet-a1b1c1d1",
+ "subnet-a2b2c2d2",
+ "subnet-a3b3c3d3"
+ ]
+ }
+ ]
+ """
+
+ try:
+ asgs = conn.describe_auto_scaling_groups()
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ matched_asgs = []
+
+ if name is not None:
+ # if the user didn't specify a name
+ name_prog = re.compile(r'^' + name)
+
+ for asg in asgs['AutoScalingGroups']:
+ if name:
+ matched_name = name_prog.search(asg['AutoScalingGroupName'])
+ else:
+ matched_name = True
+
+ if tags:
+ matched_tags = match_asg_tags(tags, asg)
+ else:
+ matched_tags = True
+
+ if matched_name and matched_tags:
+ matched_asgs.append(camel_dict_to_snake_dict(asg))
+
+ return matched_asgs
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ asg_name = module.params.get('name')
+ asg_tags = module.params.get('tags')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
+ module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_customer_gateway.py b/lib/ansible/modules/extras/cloud/amazon/ec2_customer_gateway.py
new file mode 100644
index 0000000000..4e02523a70
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_customer_gateway.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_customer_gateway
+short_description: Manage an AWS customer gateway
+description:
+ - Manage an AWS customer gateway
+version_added: "2.2"
+author: Michael Baydoun (@MichaelBaydoun)
+requirements: [ botocore, boto3 ]
+notes:
+ - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.
+ - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
+ customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
+options:
+ bgp_asn:
+ description:
+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present.
+ required: false
+ default: null
+ ip_address:
+ description:
+ - Internet-routable IP address for customers gateway, must be a static address.
+ required: true
+ name:
+ description:
+ - Name of the customer gateway.
+ required: true
+ state:
+ description:
+ - Create or terminate the Customer Gateway.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+
+# Create Customer Gateway
+- ec2_customer_gateway:
+ bgp_asn: 12345
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ region: us-east-1
+ register: cgw
+
+# Delete Customer Gateway
+- ec2_customer_gateway:
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ state: absent
+ region: us-east-1
+ register: cgw
+'''
+
+RETURN = '''
+gateway.customer_gateways:
+ description: details about the gateway that was created.
+ returned: success
+ type: complex
+ contains:
+ bgp_asn:
+ description: The Border Gateway Autonomous System Number.
+ returned: when exists and gateway is available.
+ sample: 65123
+ type: string
+ customer_gateway_id:
+ description: gateway id assigned by amazon.
+ returned: when exists and gateway is available.
+ sample: cgw-cb6386a2
+ type: string
+ ip_address:
+ description: ip address of your gateway device.
+ returned: when exists and gateway is available.
+ sample: 1.2.3.4
+ type: string
+ state:
+ description: state of gateway.
+ returned: when gateway exists and is available.
+ state: available
+ type: string
+ tags:
+ description: any tags on the gateway.
+ returned: when gateway exists and is available, and when tags exist.
+ state: available
+ type: string
+ type:
+ description: encryption type.
+ returned: when gateway exists and is available.
+ sample: ipsec.1
+ type: string
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ HAS_BOTOCORE = True
+except ImportError:
+ HAS_BOTOCORE = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+class Ec2CustomerGatewayManager:
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except ClientError, e:
+ module.fail_json(msg=e.message)
+
+ def ensure_cgw_absent(self, gw_id):
+ response = self.ec2.delete_customer_gateway(
+ DryRun=False,
+ CustomerGatewayId=gw_id
+ )
+ return response
+
+ def ensure_cgw_present(self, bgp_asn, ip_address):
+ response = self.ec2.create_customer_gateway(
+ DryRun=False,
+ Type='ipsec.1',
+ PublicIp=ip_address,
+ BgpAsn=bgp_asn,
+ )
+ return response
+
+ def tag_cgw_name(self, gw_id, name):
+ response = self.ec2.create_tags(
+ DryRun=False,
+ Resources=[
+ gw_id,
+ ],
+ Tags=[
+ {
+ 'Key': 'Name',
+ 'Value': name
+ },
+ ]
+ )
+ return response
+
+ def describe_gateways(self, ip_address):
+ response = self.ec2.describe_customer_gateways(
+ DryRun=False,
+ Filters=[
+ {
+ 'Name': 'state',
+ 'Values': [
+ 'available',
+ ]
+ },
+ {
+ 'Name': 'ip-address',
+ 'Values': [
+ ip_address,
+ ]
+ }
+ ]
+ )
+ return response
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ bgp_asn=dict(required=False, type='int'),
+ ip_address=dict(required=True),
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['bgp_arn'])
+ ]
+ )
+
+ if not HAS_BOTOCORE:
+ module.fail_json(msg='botocore is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ gw_mgr = Ec2CustomerGatewayManager(module)
+
+ bgp_asn = module.params.get('bgp_asn')
+ ip_address = module.params.get('ip_address')
+ name = module.params.get('name')
+
+ existing = gw_mgr.describe_gateways(module.params['ip_address'])
+ # describe_gateways returns a key of CustomerGateways where as create_gateway returns a
+ # key of CustomerGateway. For consistency, change it here
+ existing['CustomerGateway'] = existing['CustomerGateways']
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing['CustomerGateway']:
+ results['gateway'] = existing
+ if existing['CustomerGateway'][0]['Tags']:
+ tag_array = existing['CustomerGateway'][0]['Tags']
+ for key, value in enumerate(tag_array):
+ if value['Key'] == 'Name':
+ current_name = value['Value']
+ if current_name != name:
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway'][0]['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+ else:
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_present(
+ module.params['bgp_asn'],
+ module.params['ip_address'],
+ )
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway']['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if existing['CustomerGateway']:
+ results['gateway'] = existing
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_absent(
+ existing['CustomerGateway'][0]['CustomerGatewayId']
+ )
+ results['changed'] = True
+
+ pretty_results = camel_dict_to_snake_dict(results)
+ module.exit_json(**pretty_results)
+
+# import module methods
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_elb_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_elb_facts.py
new file mode 100644
index 0000000000..e386439d1d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_elb_facts.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_elb_facts
+short_description: Gather facts about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather facts about EC2 Elastic Load Balancers in AWS
+version_added: "2.0"
+author:
+ - "Michael Schultz (github.com/mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
+ required: false
+ default: null
+ aliases: ['elb_ids', 'ec2_elbs']
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match ec2_elb_lb module input parameters
+
+# Gather facts about all ELBs
+- action:
+ module: ec2_elb_facts
+ register: elb_facts
+
+- action:
+ module: debug
+ msg: "{{ item.dns_name }}"
+ with_items: elb_facts.elbs
+
+# Gather facts about a particular ELB
+- action:
+ module: ec2_elb_facts
+ names: frontend-prod-elb
+ register: elb_facts
+
+- action:
+ module: debug
+ msg: "{{ elb_facts.elbs.0.dns_name }}"
+
+# Gather facts about a set of ELBs
+- action:
+ module: ec2_elb_facts
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_facts
+
+- action:
+ module: debug
+ msg: "{{ item.dns_name }}"
+ with_items: elb_facts.elbs
+
+'''
+
+try:
+ import boto.ec2.elb
+ from boto.ec2.tag import Tag
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+class ElbInformation(object):
+ """ Handles ELB information """
+
+ def __init__(self,
+ module,
+ names,
+ region,
+ **aws_connect_params):
+
+ self.module = module
+ self.names = names
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.connection = self._get_elb_connection()
+
+ def _get_tags(self, elbname):
+ params = {'LoadBalancerNames.member.1': elbname}
+ try:
+ elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
+ return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
+ except:
+ return {}
+
+ def _get_elb_connection(self):
+ try:
+ return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
+ except BotoServerError as err:
+ self.module.fail_json(msg=err.message)
+
+ def _get_elb_listeners(self, listeners):
+ listener_list = []
+
+ for listener in listeners:
+ listener_dict = {
+ 'load_balancer_port': listener[0],
+ 'instance_port': listener[1],
+ 'protocol': listener[2],
+ }
+
+ try:
+ ssl_certificate_id = listener[4]
+ except IndexError:
+ pass
+ else:
+ if ssl_certificate_id:
+ listener_dict['ssl_certificate_id'] = ssl_certificate_id
+
+ listener_list.append(listener_dict)
+
+ return listener_list
+
+ def _get_health_check(self, health_check):
+ protocol, port_path = health_check.target.split(':')
+ try:
+ port, path = port_path.split('/', 1)
+ path = '/{}'.format(path)
+ except ValueError:
+ port = port_path
+ path = None
+
+ health_check_dict = {
+ 'ping_protocol': protocol.lower(),
+ 'ping_port': int(port),
+ 'response_timeout': health_check.timeout,
+ 'interval': health_check.interval,
+ 'unhealthy_threshold': health_check.unhealthy_threshold,
+ 'healthy_threshold': health_check.healthy_threshold,
+ }
+
+ if path:
+ health_check_dict['ping_path'] = path
+ return health_check_dict
+
+ def _get_elb_info(self, elb):
+ elb_info = {
+ 'name': elb.name,
+ 'zones': elb.availability_zones,
+ 'dns_name': elb.dns_name,
+ 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
+ 'hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'hosted_zone_id': elb.canonical_hosted_zone_name_id,
+ 'instances': [instance.id for instance in elb.instances],
+ 'listeners': self._get_elb_listeners(elb.listeners),
+ 'scheme': elb.scheme,
+ 'security_groups': elb.security_groups,
+ 'health_check': self._get_health_check(elb.health_check),
+ 'subnets': elb.subnets,
+ 'instances_inservice': [],
+ 'instances_inservice_count': 0,
+ 'instances_outofservice': [],
+ 'instances_outofservice_count': 0,
+ 'instances_inservice_percent': 0.0,
+ 'tags': self._get_tags(elb.name)
+ }
+
+ if elb.vpc_id:
+ elb_info['vpc_id'] = elb.vpc_id
+
+ if elb.instances:
+ try:
+ instance_health = self.connection.describe_instance_health(elb.name)
+ except BotoServerError as err:
+ self.module.fail_json(msg=err.message)
+ elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
+ elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
+ elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
+ elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
+ elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/(
+ float(elb_info['instances_inservice_count']) +
+ float(elb_info['instances_outofservice_count']))*100
+ return elb_info
+
+
+ def list_elbs(self):
+ elb_array = []
+
+ try:
+ all_elbs = self.connection.get_all_load_balancers()
+ except BotoServerError as err:
+ self.module.fail_json(msg = "%s: %s" % (err.error_code, err.error_message))
+
+ if all_elbs:
+ for existing_lb in all_elbs:
+ if existing_lb.name in self.names:
+ elb_array.append(self._get_elb_info(existing_lb))
+
+ return elb_array
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ names={'default': None, 'type': 'list'}
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ names = module.params['names']
+ elb_information = ElbInformation(module,
+ names,
+ region,
+ **aws_connect_params)
+
+ ec2_facts_result = dict(changed=False,
+ elbs=elb_information.list_elbs())
+
+ module.exit_json(**ec2_facts_result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_eni.py b/lib/ansible/modules/extras/cloud/amazon/ec2_eni.py
new file mode 100644
index 0000000000..79d44f9d46
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_eni.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is \
+ provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status \
+ of the network interface.
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI
+ required: false
+ default: null
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or \
+ detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'.
+ required: false
+ default: null
+ private_ip_address:
+ description:
+ - Private IP address.
+ required: false
+ default: null
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI. Only required when state=present.
+ required: true
+ description:
+ description:
+ - Optional description of the ENI.
+ required: false
+ default: null
+ security_groups:
+ description:
+ - List of security groups associated with the interface. Only used when state=present. Since version 2.2, you \
+ can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
+ required: false
+ default: null
+ state:
+ description:
+ - Create or delete ENI
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ required: false
+ default: 0
+ attached:
+ description:
+ - Specifies if network interface should be attached or detached from instance. If ommited, attachment status \
+ won't change
+ required: false
+ default: yes
+ version_added: 2.2
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
+ required: false
+ default: no
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ secondary_private_ip_addresses:
+ description:
+ - A list of IP addresses to assign as secondary IP addresses to the network interface. This option is mutually exclusive of secondary_private_ip_address_count
+ required: false
+ version_added: 2.2
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses
+ required: false
+ version_added: 2.2
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI with two secondary addresses
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ secondary_private_ip_address_count: 2
+
+# Assign a secondary IP address to an existing ENI
+# This will purge any existing IPs
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ - 172.16.1.1
+
+# Remove any secondary IP addresses from an existing ENI
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ -
+
+# Destroy an ENI, detaching it from any instance if necessary
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: yes
+ state: absent
+
+# Update an ENI
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Detach an ENI from an instance
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- ec2_eni:
+ eni_id: {{ "eni.interface.id" }}
+ delete_on_termination: true
+
+'''
+
+
+RETURN = '''
+interface:
+ description: Network interface attributes
+ returned: when state != absent
+ type: dictionary
+ contains:
+ description:
+ description: interface description
+ type: string
+ sample: Firewall network interface
+ groups:
+ description: list of security groups
+ type: list of dictionaries
+ sample: [ { "sg-f8a8a9da": "default" } ]
+ id:
+ description: network interface id
+ type: string
+ sample: "eni-1d889198"
+ mac_address:
+ description: interface's physical address
+ type: string
+ sample: "00:00:5E:00:53:23"
+ owner_id:
+ description: aws account id
+ type: string
+ sample: 812381371
+ private_ip_address:
+ description: primary ip address of this interface
+ type: string
+ sample: 10.20.30.40
+ private_ip_addresses:
+ description: list of all private ip addresses associated to this interface
+ type: list of dictionaries
+ sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
+ source_dest_check:
+ description: value of source/dest check flag
+ type: boolean
+ sample: True
+ status:
+ description: network interface status
+ type: string
+ sample: "pending"
+ subnet_id:
+ description: which vpc subnet the interface is bound
+ type: string
+ sample: subnet-b0a0393c
+ vpc_id:
+ description: which vpc this network interface is bound
+ type: string
+ sample: vpc-9a9a9da
+
+'''
+
+import time
+import re
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def wait_for_eni(eni, status):
+
+ while True:
+ time.sleep(3)
+ eni.update()
+ # If the status is detached we just need attachment to disappear
+ if eni.attachment is None:
+ if status == "detached":
+ break
+ else:
+ if status == "attached" and eni.attachment.status == "attached":
+ break
+
+
+def create_eni(connection, vpc_id, module):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ if instance_id == 'None':
+ instance_id = None
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ try:
+ eni = find_eni(connection, module)
+ if eni is None:
+ eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
+ if attached == True and instance_id is not None:
+ try:
+ eni.attach(instance_id, device_index)
+ except BotoServerError:
+ eni.delete()
+ raise
+ # Wait to allow creation / attachment to finish
+ wait_for_eni(eni, "attached")
+ eni.update()
+
+ if secondary_private_ip_address_count is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ if secondary_private_ip_addresses is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ changed = True
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, vpc_id, module, eni):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ do_detach = module.params.get('state') == 'detached'
+ device_index = module.params.get("device_index")
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ force_detach = module.params.get("force_detach")
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ try:
+ if description is not None:
+ if eni.description != description:
+ connection.modify_network_interface_attribute(eni.id, "description", description)
+ changed = True
+ if len(security_groups) > 0:
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
+ if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
+ connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
+ changed = True
+ if source_dest_check is not None:
+ if eni.source_dest_check != source_dest_check:
+ connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ if delete_on_termination is not None and eni.attachment is not None:
+ if eni.attachment.delete_on_termination is not delete_on_termination:
+ connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
+ changed = True
+
+ current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
+ if secondary_private_ip_addresses is not None:
+ secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
+ if secondary_addresses_to_remove:
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), dry_run=False)
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses, secondary_private_ip_address_count=None, allow_reassignment=False, dry_run=False)
+ if secondary_private_ip_address_count is not None:
+ current_secondary_address_count = len(current_secondary_addresses)
+
+ if secondary_private_ip_address_count > current_secondary_address_count:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=None, secondary_private_ip_address_count=(secondary_private_ip_address_count - current_secondary_address_count), allow_reassignment=False, dry_run=False)
+ changed = True
+ elif secondary_private_ip_address_count < current_secondary_address_count:
+ # How many of these addresses do we want to remove
+ secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count], dry_run=False)
+
+ if attached == True:
+ if eni.attachment and eni.attachment.instance_id != instance_id:
+ detach_eni(eni, module)
+ if eni.attachment is None:
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+ elif attached == False:
+ detach_eni(eni, module)
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ eni.update()
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def delete_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ force_detach = module.params.get("force_detach")
+
+ try:
+ eni_result_set = connection.get_all_network_interfaces(eni_id)
+ eni = eni_result_set[0]
+
+ if force_detach is True:
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ # Wait to allow detachment to finish
+ wait_for_eni(eni, "detached")
+ eni.update()
+ eni.delete()
+ changed = True
+ else:
+ eni.delete()
+ changed = True
+
+ module.exit_json(changed=changed)
+ except BotoServerError as e:
+ regex = re.compile('The networkInterface ID \'.*\' does not exist')
+ if regex.search(e.message) is not None:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg=e.message)
+
+
+def detach_eni(eni, module):
+
+ force_detach = module.params.get("force_detach")
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ wait_for_eni(eni, "detached")
+ eni.update()
+ module.exit_json(changed=True, interface=get_eni_info(eni))
+ else:
+ module.exit_json(changed=False, interface=get_eni_info(eni))
+
+
+def find_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ instance_id = module.params.get('instance_id')
+ device_index = module.params.get('device_index')
+
+ try:
+ filters = {}
+ if subnet_id:
+ filters['subnet-id'] = subnet_id
+ if private_ip_address:
+ filters['private-ip-address'] = private_ip_address
+ else:
+ if instance_id:
+ filters['attachment.instance-id'] = instance_id
+ if device_index:
+ filters['attachment.device-index'] = device_index
+
+ eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
+ if len(eni_result) > 0:
+ return eni_result[0]
+ else:
+ return None
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ return None
+
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group.id.encode())
+
+ return remote_security_groups
+
+
+def _get_vpc_id(connection, module, subnet_id):
+
+ try:
+ return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ eni_id=dict(default=None, type='str'),
+ instance_id=dict(default=None, type='str'),
+ private_ip_address=dict(type='str'),
+ subnet_id=dict(type='str'),
+ description=dict(type='str'),
+ security_groups=dict(default=[], type='list'),
+ device_index=dict(default=0, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_detach=dict(default='no', type='bool'),
+ source_dest_check=dict(default=None, type='bool'),
+ delete_on_termination=dict(default=None, type='bool'),
+ secondary_private_ip_addresses=dict(default=None, type='list'),
+ secondary_private_ip_address_count=dict(default=None, type='int'),
+ attached=dict(default=None, type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
+ ],
+ required_if=([
+ ('state', 'present', ['subnet_id']),
+ ('state', 'absent', ['eni_id']),
+ ('attached', True, ['instance_id'])
+ ])
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+ eni_id = module.params.get("eni_id")
+ private_ip_address = module.params.get('private_ip_address')
+
+ if state == 'present':
+ subnet_id = module.params.get("subnet_id")
+ vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
+
+ eni = find_eni(connection, module)
+ if eni is None:
+ create_eni(connection, vpc_id, module)
+ else:
+ modify_eni(connection, vpc_id, module, eni)
+
+ elif state == 'absent':
+ delete_eni(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_eni_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_eni_facts.py
new file mode 100644
index 0000000000..8b385dabc8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_eni_facts.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_facts
+short_description: Gather facts about ec2 ENI interfaces in AWS
+description:
+ - Gather facts about ec2 ENI interfaces in AWS
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ required: false
+ default: null
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all ENIs
+- ec2_eni_facts:
+
+# Gather facts about a particular ENI
+- ec2_eni_facts:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def list_ec2_snapshots_boto3(connection, module):
+
+ if module.params.get("filters") is None:
+ filters = []
+ else:
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(Filters=filters)
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json(msg=e.message)
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_network_interfaces_result = camel_dict_to_snake_dict(network_interfaces_result)
+ for network_interfaces in snaked_network_interfaces_result['network_interfaces']:
+ network_interfaces['tag_set'] = boto3_tag_list_to_ansible_dict(network_interfaces['tag_set'])
+
+ module.exit_json(**snaked_network_interfaces_result)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def list_eni(connection, module):
+
+ filters = module.params.get("filters")
+ interface_dict_array = []
+
+ try:
+ all_eni = connection.get_all_network_interfaces(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for interface in all_eni:
+ interface_dict_array.append(get_eni_info(interface))
+
+ module.exit_json(interfaces=interface_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ if HAS_BOTO3:
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_snapshots_boto3(connection, module)
+ else:
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_eni(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_lc_find.py b/lib/ansible/modules/extras/cloud/amazon/ec2_lc_find.py
new file mode 100644
index 0000000000..32e0d0eb3a
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_lc_find.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015, Jose Armesto <jose@armesto.net>
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: ec2_lc_find
+short_description: Find AWS Autoscaling Launch Configurations
+description:
+ - Returns list of matching Launch Configurations for a given name, along with other useful information
+ - Results can be sorted and sliced
+ - It depends on boto
+ - Based on the work by Tom Bamford (https://github.com/tombamford)
+
+version_added: "2.2"
+author: "Jose Armesto (@fiunchinho)"
+options:
+ region:
+ description:
+ - The AWS region to use.
+ required: true
+ aliases: ['aws_region', 'ec2_region']
+ name_regex:
+ description:
+ - A Launch Configuration to match
+ - It'll be compiled as regex
+ required: True
+ sort_order:
+ description:
+ - Order in which to sort results.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ limit:
+ description:
+ - How many results to show.
+ - Corresponds to Python slice notation like list[:limit].
+ default: null
+ required: false
+requirements:
+ - "python >= 2.6"
+ - boto3
+"""
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the Launch Configurations that start with "app"
+- ec2_lc_find:
+ name_regex: app.*
+ sort_order: descending
+ limit: 2
+'''
+
+RETURN = '''
+image_id:
+ description: AMI id
+ returned: when Launch Configuration was found
+ type: string
+ sample: "ami-0d75df7e"
+user_data:
+ description: User data used to start instance
+ returned: when Launch Configuration was found
+ type: string
+ user_data: "ZXhwb3J0IENMT1VE"
+name:
+ description: Name of the AMI
+ returned: when Launch Configuration was found
+ type: string
+ sample: "myapp-v123"
+arn:
+ description: Name of the AMI
+ returned: when Launch Configuration was found
+ type: string
+ sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
+instance_type:
+ description: Type of ec2 instance
+ returned: when Launch Configuration was found
+ type: string
+ sample: "t2.small"
+created_time:
+ description: When it was created
+ returned: when Launch Configuration was found
+ type: string
+ sample: "2016-06-29T14:59:22.222000+00:00"
+ebs_optimized:
+ description: Launch Configuration EBS optimized property
+ returned: when Launch Configuration was found
+ type: boolean
+ sample: False
+instance_monitoring:
+ description: Launch Configuration instance monitoring property
+ returned: when Launch Configuration was found
+ type: string
+ sample: {"Enabled": false}
+classic_link_vpc_security_groups:
+ description: Launch Configuration classic link vpc security groups property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+block_device_mappings:
+ description: Launch Configuration block device mappings property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+keyname:
+ description: Launch Configuration ssh key
+ returned: when Launch Configuration was found
+ type: string
+ sample: mykey
+security_groups:
+ description: Launch Configuration security groups
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+kernel_id:
+ description: Launch Configuration kernel to use
+ returned: when Launch Configuration was found
+ type: string
+ sample: ''
+ram_disk_id:
+ description: Launch Configuration ram disk property
+ returned: when Launch Configuration was found
+ type: string
+ sample: ''
+associate_public_address:
+ description: Assign public address or not
+ returned: when Launch Configuration was found
+ type: boolean
+ sample: True
+...
+'''
+
+
+def find_launch_configs(client, module):
+ name_regex = module.params.get('name_regex')
+ sort_order = module.params.get('sort_order')
+ limit = module.params.get('limit')
+
+ paginator = client.get_paginator('describe_launch_configurations')
+
+ response_iterator = paginator.paginate(
+ PaginationConfig={
+ 'MaxItems': 1000,
+ 'PageSize': 100
+ }
+ )
+
+ for response in response_iterator:
+ response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
+ response['LaunchConfigurations'])
+
+ results = []
+ for lc in response['LaunchConfigurations']:
+ data = {
+ 'name': lc['LaunchConfigurationName'],
+ 'arn': lc['LaunchConfigurationARN'],
+ 'created_time': lc['CreatedTime'],
+ 'user_data': lc['UserData'],
+ 'instance_type': lc['InstanceType'],
+ 'image_id': lc['ImageId'],
+ 'ebs_optimized': lc['EbsOptimized'],
+ 'instance_monitoring': lc['InstanceMonitoring'],
+ 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
+ 'block_device_mappings': lc['BlockDeviceMappings'],
+ 'keyname': lc['KeyName'],
+ 'security_groups': lc['SecurityGroups'],
+ 'kernel_id': lc['KernelId'],
+ 'ram_disk_id': lc['RamdiskId'],
+ 'associate_public_address': lc['AssociatePublicIpAddress'],
+ }
+
+ results.append(data)
+
+ results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
+
+ if limit:
+ results = results[:int(limit)]
+
+ module.exit_json(changed=False, results=results)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, aliases=['aws_region', 'ec2_region']),
+ name_regex=dict(required=True),
+ sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
+ limit=dict(required=False, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
+
+ client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
+ find_launch_configs(client, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_remote_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_remote_facts.py
new file mode 100644
index 0000000000..5b3f909976
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_remote_facts.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_remote_facts
+short_description: Gather facts about ec2 instances in AWS
+description:
+ - Gather facts about ec2 instances in AWS
+version_added: "2.0"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
+ required: false
+ default: null
+author:
+ - "Michael Schuett (@michaeljs1990)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all ec2 instances
+- ec2_remote_facts:
+
+# Gather facts about all running ec2 instances with a tag of Name:Example
+- ec2_remote_facts:
+ filters:
+ instance-state-name: running
+ "tag:Name": Example
+
+# Gather facts about instance i-123456
+- ec2_remote_facts:
+ filters:
+ instance-id: i-123456
+
+# Gather facts about all instances in vpc-123456 that are t2.small type
+- ec2_remote_facts:
+ filters:
+ vpc-id: vpc-123456
+ instance-type: t2.small
+
+'''
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def get_instance_info(instance):
+
+ # Get groups
+ groups = []
+ for group in instance.groups:
+ groups.append({ 'id': group.id, 'name': group.name }.copy())
+
+ # Get interfaces
+ interfaces = []
+ for interface in instance.interfaces:
+ interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
+
+ # If an instance is terminated, sourceDestCheck is no longer returned
+ try:
+ source_dest_check = instance.sourceDestCheck
+ except AttributeError:
+ source_dest_check = None
+
+ # Get block device mapping
+ try:
+ bdm_dict = []
+ bdm = getattr(instance, 'block_device_mapping')
+ for device_name in bdm.keys():
+ bdm_dict.append({
+ 'device_name': device_name,
+ 'status': bdm[device_name].status,
+ 'volume_id': bdm[device_name].volume_id,
+ 'delete_on_termination': bdm[device_name].delete_on_termination,
+ 'attach_time': bdm[device_name].attach_time
+ })
+ except AttributeError:
+ pass
+
+ instance_info = { 'id': instance.id,
+ 'kernel': instance.kernel,
+ 'instance_profile': instance.instance_profile,
+ 'root_device_type': instance.root_device_type,
+ 'private_dns_name': instance.private_dns_name,
+ 'public_dns_name': instance.public_dns_name,
+ 'ebs_optimized': instance.ebs_optimized,
+ 'client_token': instance.client_token,
+ 'virtualization_type': instance.virtualization_type,
+ 'architecture': instance.architecture,
+ 'ramdisk': instance.ramdisk,
+ 'tags': instance.tags,
+ 'key_name': instance.key_name,
+ 'source_destination_check': source_dest_check,
+ 'image_id': instance.image_id,
+ 'groups': groups,
+ 'interfaces': interfaces,
+ 'spot_instance_request_id': instance.spot_instance_request_id,
+ 'requester_id': instance.requester_id,
+ 'monitoring_state': instance.monitoring_state,
+ 'placement': {
+ 'tenancy': instance._placement.tenancy,
+ 'zone': instance._placement.zone
+ },
+ 'ami_launch_index': instance.ami_launch_index,
+ 'launch_time': instance.launch_time,
+ 'hypervisor': instance.hypervisor,
+ 'region': instance.region.name,
+ 'persistent': instance.persistent,
+ 'private_ip_address': instance.private_ip_address,
+ 'public_ip_address': instance.ip_address,
+ 'state': instance._state.name,
+ 'vpc_id': instance.vpc_id,
+ 'block_device_mapping': bdm_dict,
+ }
+
+ return instance_info
+
+
+def list_ec2_instances(connection, module):
+
+ filters = module.params.get("filters")
+ instance_dict_array = []
+
+ try:
+ all_instances = connection.get_only_instances(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for instance in all_instances:
+ instance_dict_array.append(get_instance_info(instance))
+
+ module.exit_json(instances=instance_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_instances(connection, module)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_snapshot_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_snapshot_facts.py
new file mode 100644
index 0000000000..9904eb8591
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_snapshot_facts.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot_facts
+short_description: Gather facts about ec2 volume snapshots in AWS
+description:
+ - Gather facts about ec2 volume snapshots in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ snapshot_ids:
+ description:
+ - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
+ required: false
+ default: []
+ owner_ids:
+ description:
+ - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \
+ access are returned.
+ required: false
+ default: []
+ restorable_by_user_ids:
+ description:
+ - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \
+ returned.
+ required: false
+ default: []
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
+ U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \
+ names and values are case sensitive.
+ required: false
+ default: {}
+notes:
+ - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \
+ the account use the filter 'owner-id'.
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all snapshots, including public ones
+- ec2_snapshot_facts:
+
+# Gather facts about all snapshots owned by the account 0123456789
+- ec2_snapshot_facts:
+ filters:
+ owner-id: 0123456789
+
+# Or alternatively...
+- ec2_snapshot_facts:
+ owner_ids:
+ - 0123456789
+
+# Gather facts about a particular snapshot using ID
+- ec2_snapshot_facts:
+ filters:
+ snapshot-id: snap-00112233
+
+# Or alternatively...
+- ec2_snapshot_facts:
+ snapshot_ids:
+ - snap-00112233
+
+# Gather facts about any snapshot with a tag key Name and value Example
+- ec2_snapshot_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any snapshot with an error status
+- ec2_snapshot_facts:
+ filters:
+ status: error
+
+'''
+
+RETURN = '''
+snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: string
+ sample: snap-01234567
+volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: string
+ sample: vol-01234567
+state:
+ description: The snapshot state (completed, pending or error).
+ type: string
+ sample: completed
+state_message:
+ description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred.
+ type: string
+ sample:
+start_time:
+ description: The time stamp when the snapshot was initiated.
+ type: datetime
+ sample: 2015-02-12T02:14:02+00:00
+progress:
+ description: The progress of the snapshot, as a percentage.
+ type: string
+ sample: 100%
+owner_id:
+ description: The AWS account ID of the EBS snapshot owner.
+ type: string
+ sample: 099720109477
+description:
+ description: The description for the snapshot.
+ type: string
+ sample: My important backup
+volume_size:
+ description: The size of the volume, in GiB.
+ type: integer
+ sample: 8
+owner_alias:
+ description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
+ type: string
+ sample: 033440102211
+tags:
+ description: Any tags assigned to the snapshot.
+ type: list
+ sample: "{ 'my_tag_key': 'my_tag_value' }"
+encrypted:
+ description: Indicates whether the snapshot is encrypted.
+ type: boolean
+ sample: True
+kms_key_id:
+ description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
+ protect the volume encryption key for the parent volume.
+ type: string
+ sample: 74c9742a-a1b2-45cb-b3fe-abcdef123456
+data_encryption_key_id:
+ description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
+ corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
+ type: string
+ sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
+
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def list_ec2_snapshots(connection, module):
+
+ snapshot_ids = module.params.get("snapshot_ids")
+ owner_ids = module.params.get("owner_ids")
+ restorable_by_user_ids = module.params.get("restorable_by_user_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
+ except ClientError, e:
+ module.fail_json(msg=e.message)
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_snapshots = []
+ for snapshot in snapshots['Snapshots']:
+ snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for snapshot in snaked_snapshots:
+ if 'tags' in snapshot:
+ snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'])
+
+ module.exit_json(snapshots=snaked_snapshots)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ snapshot_ids=dict(default=[], type='list'),
+ owner_ids=dict(default=[], type='list'),
+ restorable_by_user_ids=dict(default=[], type='list'),
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
+ ]
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_snapshots(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vol_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vol_facts.py
new file mode 100644
index 0000000000..e053a772d7
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vol_facts.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vol_facts
+short_description: Gather facts about ec2 volumes in AWS
+description:
+ - Gather facts about ec2 volumes in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all volumes
+- ec2_vol_facts:
+
+# Gather facts about a particular volume using volume ID
+- ec2_vol_facts:
+ filters:
+ volume-id: vol-00112233
+
+# Gather facts about any volume with a tag key Name and value Example
+- ec2_vol_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any volume that is attached
+- ec2_vol_facts:
+ filters:
+ attachment.status: attached
+
+'''
+
+# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
+# fix this
+RETURN = '''# '''
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def get_volume_info(volume):
+
+ attachment = volume.attach_data
+
+ volume_info = {
+ 'create_time': volume.create_time,
+ 'id': volume.id,
+ 'iops': volume.iops,
+ 'size': volume.size,
+ 'snapshot_id': volume.snapshot_id,
+ 'status': volume.status,
+ 'type': volume.type,
+ 'zone': volume.zone,
+ 'region': volume.region.name,
+ 'attachment_set': {
+ 'attach_time': attachment.attach_time,
+ 'device': attachment.device,
+ 'instance_id': attachment.instance_id,
+ 'status': attachment.status
+ },
+ 'tags': volume.tags
+ }
+
+ return volume_info
+
+def list_ec2_volumes(connection, module):
+
+ filters = module.params.get("filters")
+ volume_dict_array = []
+
+ try:
+ all_volumes = connection.get_all_volumes(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for volume in all_volumes:
+ volume_dict_array.append(get_volume_info(volume))
+
+ module.exit_json(volumes=volume_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_volumes(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options.py
new file mode 100644
index 0000000000..198d063717
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: ec2_vpc_dhcp_options
+short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
+ requested
+description:
+ - This module removes, or creates DHCP option sets, and can associate them to a VPC.
+ Optionally, a new DHCP Options set can be created that converges a VPC's existing
+ DHCP option set with values provided.
+ When dhcp_options_id is provided, the module will
+ 1. remove (with state='absent')
+ 2. ensure tags are applied (if state='present' and tags are provided
+ 3. attach it to a VPC (if state='present' and a vpc_id is provided.
+ If any of the optional values are missing, they will either be treated
+ as a no-op (i.e., inherit what already exists for the VPC)
+ To remove existing options while inheriting, supply an empty value
+ (e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
+ Most of the options should be self-explanatory.
+author: "Joel Thompson (@joelthompson)"
+version_added: 2.1
+options:
+ domain_name:
+ description:
+ - The domain name to set in the DHCP option sets
+ required: false
+ default: None
+ dns_servers:
+ description:
+ - A list of hosts to set the DNS servers for the VPC to. (Should be a
+ list of IP addresses rather than host names.)
+ required: false
+ default: None
+ ntp_servers:
+ description:
+ - List of hosts to advertise as NTP servers for the VPC.
+ required: false
+ default: None
+ netbios_name_servers:
+ description:
+ - List of hosts to advertise as NetBIOS servers.
+ required: false
+ default: None
+ netbios_node_type:
+ description:
+ - NetBIOS node type to advertise in the DHCP options.
+ The AWS recommendation is to use 2 (when using netbios name services)
+ http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
+ required: false
+ default: None
+ vpc_id:
+ description:
+ - VPC ID to associate with the requested DHCP option set.
+ If no vpc id is provided, and no matching option set is found then a new
+ DHCP option set is created.
+ required: false
+ default: None
+ delete_old:
+ description:
+ - Whether to delete the old VPC DHCP option set when associating a new one.
+ This is primarily useful for debugging/development purposes when you
+ want to quickly roll back to the old option set. Note that this setting
+ will be ignored, and the old DHCP option set will be preserved, if it
+ is in use by any other VPC. (Otherwise, AWS will return an error.)
+ required: false
+ default: true
+ inherit_existing:
+ description:
+ - For any DHCP options not specified in these parameters, whether to
+ inherit them from the options set already applied to vpc_id, or to
+ reset them to be empty.
+ required: false
+ default: false
+ tags:
+ description:
+ - Tags to be applied to a VPC options set if a new one is created, or
+ if the resource_id is provided. (options must match)
+ required: False
+ default: None
+ aliases: [ 'resource_tags']
+ version_added: "2.1"
+ dhcp_options_id:
+ description:
+ - The resource_id of an existing DHCP options set.
+ If this is specified, then it will override other settings, except tags
+ (which will be updated to match)
+ required: False
+ default: None
+ version_added: "2.1"
+ state:
+ description:
+ - create/assign or remove the DHCP options.
+ If state is set to absent, then a DHCP options set matched either
+ by id, or tags and options will be removed if possible.
+ required: False
+ default: present
+ choices: [ 'absent', 'present' ]
+ version_added: "2.1"
+extends_documentation_fragment: aws
+requirements:
+ - boto
+"""
+
+RETURN = """
+new_options:
+ description: The DHCP options created, associated or found
+ returned: when appropriate
+ type: dict
+ sample:
+ domain-name-servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbois-name-servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-node-type: 2
+ domain-name: "my.example.com"
+dhcp_options_id:
+ description: The aws resource id of the primary DCHP options set created, found or removed
+ type: string
+ returned: when available
+changed:
+ description: Whether the dhcp options were changed
+ type: bool
+ returned: always
+"""
+
+EXAMPLES = """
+# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
+# DHCP option set that may have been attached to that VPC.
+- ec2_vpc_dhcp_options:
+ domain_name: "foo.example.com"
+ region: us-east-1
+ dns_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ vpc_id: vpc-123456
+ delete_old: True
+ inherit_existing: False
+
+
+# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
+# keep any other existing settings. Also, keep the old DHCP option set around.
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dns_servers:
+ - "{{groups['dns-primary']}}"
+ - "{{groups['dns-secondary']}}"
+ vpc_id: vpc-123456
+ inherit_existing: True
+ delete_old: False
+
+
+## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
+## but do not assign to a VPC
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+
+## Delete a DHCP options set that matches the tags and options specified
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+ state: absent
+
+## Associate a DHCP options set with a VPC by ID
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dhcp_options_id: dopt-12345678
+ vpc_id: vpc-123456
+
+"""
+
+import boto.vpc
+import boto.ec2
+from boto.exception import EC2ResponseError
+import socket
+import collections
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if tags == cur_tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
+ if to_add:
+ vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ module.fail_json(msg=get_error_message(e.args[2]))
+
+def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
+ """
+ Returns the DHCP options object currently associated with the requested VPC ID using the VPC
+ connection variable.
+ """
+ vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
+ if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
+ return None
+ dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
+ if len(dhcp_options) != 1:
+ return None
+ return dhcp_options[0]
+
+def match_dhcp_options(vpc_conn, tags=None, options=None):
+ """
+ Finds a DHCP Options object that optionally matches the tags and options provided
+ """
+ dhcp_options = vpc_conn.get_all_dhcp_options()
+ for dopts in dhcp_options:
+ if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
+ if (not options) or dopts.options == options:
+ return(True, dopts)
+ return(False, None)
+
+def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
+ associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
+ if len(associations) > 0:
+ return False
+ else:
+ vpc_conn.delete_dhcp_options(dhcp_options_id)
+ return True
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ dhcp_options_id=dict(type='str', default=None),
+ domain_name=dict(type='str', default=None),
+ dns_servers=dict(type='list', default=None),
+ ntp_servers=dict(type='list', default=None),
+ netbios_name_servers=dict(type='list', default=None),
+ netbios_node_type=dict(type='int', default=None),
+ vpc_id=dict(type='str', default=None),
+ delete_old=dict(type='bool', default=True),
+ inherit_existing=dict(type='bool', default=False),
+ tags=dict(type='dict', default=None, aliases=['resource_tags']),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ params = module.params
+ found = False
+ changed = False
+ new_options = collections.defaultdict(lambda: None)
+
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ connection = connect_to_aws(boto.vpc, region, **boto_params)
+
+ existing_options = None
+
+ # First check if we were given a dhcp_options_id
+ if not params['dhcp_options_id']:
+ # No, so create new_options from the parameters
+ if params['dns_servers'] != None:
+ new_options['domain-name-servers'] = params['dns_servers']
+ if params['netbios_name_servers'] != None:
+ new_options['netbios-name-servers'] = params['netbios_name_servers']
+ if params['ntp_servers'] != None:
+ new_options['ntp-servers'] = params['ntp_servers']
+ if params['domain_name'] != None:
+ # needs to be a list for comparison with boto objects later
+ new_options['domain-name'] = [ params['domain_name'] ]
+ if params['netbios_node_type'] != None:
+ # needs to be a list for comparison with boto objects later
+ new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
+ # If we were given a vpc_id then we need to look at the options on that
+ if params['vpc_id']:
+ existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
+ # if we've been asked to inherit existing options, do that now
+ if params['inherit_existing']:
+ if existing_options:
+ for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
+ if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
+ new_options[option] = existing_options.options.get(option)
+
+ # Do the vpc's dhcp options already match what we're asked for? if so we are done
+ if existing_options and new_options == existing_options.options:
+ module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
+
+ # If no vpc_id was given, or the options don't match then look for an existing set using tags
+ found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
+
+ # Now let's cover the case where there are existing options that we were told about by id
+ # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
+ else:
+ supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
+ if len(supplied_options) != 1:
+ if params['state'] != 'absent':
+ module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
+ else:
+ found = True
+ dhcp_option = supplied_options[0]
+ if params['state'] != 'absent' and params['tags']:
+ ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
+
+ # Now we have the dhcp options set, let's do the necessary
+
+ # if we found options we were asked to remove then try to do so
+ if params['state'] == 'absent':
+ if not module.check_mode:
+ if found:
+ changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
+ module.exit_json(changed=changed, new_options={})
+
+ # otherwise if we haven't found the required options we have something to do
+ elif not module.check_mode and not found:
+
+ # create some dhcp options if we weren't able to use existing ones
+ if not found:
+ # Convert netbios-node-type and domain-name back to strings
+ if new_options['netbios-node-type']:
+ new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
+ if new_options['domain-name']:
+ new_options['domain-name'] = new_options['domain-name'][0]
+
+ # create the new dhcp options set requested
+ dhcp_option = connection.create_dhcp_options(
+ new_options['domain-name'],
+ new_options['domain-name-servers'],
+ new_options['ntp-servers'],
+ new_options['netbios-name-servers'],
+ new_options['netbios-node-type'])
+ changed = True
+ if params['tags']:
+ ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
+
+ # If we were given a vpc_id, then attach the options we now have to that before we finish
+ if params['vpc_id'] and not module.check_mode:
+ changed = True
+ connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
+ # and remove old ones if that was requested
+ if params['delete_old'] and existing_options:
+ remove_dhcp_options_by_id(connection, existing_options.id)
+
+ module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options_facts.py
new file mode 100644
index 0000000000..a60a210489
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_dhcp_options_facts.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_options_facts
+short_description: Gather facts about dhcp options sets in AWS
+description:
+ - Gather facts about dhcp options sets in AWS
+version_added: "2.2"
+requirements: [ boto3 ]
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ required: false
+ default: null
+ DhcpOptionsIds:
+ description:
+ - Get details of specific DHCP Option ID
+ - Provide this value as a list
+ required: false
+ default: None
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather facts about all DHCP Option sets for an account or profile
+ ec2_vpc_dhcp_options_facts:
+ region: ap-southeast-2
+ profile: production
+ register: dhcp_facts
+
+- name: Gather facts about a filtered list of DHCP Option sets
+ ec2_vpc_dhcp_options_facts:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "abc-123"
+ register: dhcp_facts
+
+- name: Gather facts about a specific DHCP Option set by DhcpOptionId
+ ec2_vpc_dhcp_options_facts:
+ region: ap-southeast-2
+ profile: production
+ DhcpOptionsIds: dopt-123fece2
+ register: dhcp_facts
+
+'''
+
+RETURN = '''
+dhcp_options:
+ description: The dhcp option sets for the account
+ returned: always
+ type: list
+
+changed:
+ description: True if listing the dhcp options succeeds
+ type: bool
+ returned: always
+'''
+
+import json
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_dhcp_options_info(dhcp_option):
+ dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
+ 'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
+ 'Tags': dhcp_option['Tags']
+ }
+ return dhcp_option_info
+
+
+def list_dhcp_options(client, module):
+ dryrun = module.params.get("DryRun")
+ all_dhcp_options_array = []
+ params = dict()
+
+ if module.params.get('filters'):
+ params['Filters'] = []
+ for key, value in module.params.get('filters').iteritems():
+ temp_dict = dict()
+ temp_dict['Name'] = key
+ if isinstance(value, basestring):
+ temp_dict['Values'] = [value]
+ else:
+ temp_dict['Values'] = value
+ params['Filters'].append(temp_dict)
+
+ if module.params.get("DryRun"):
+ params['DryRun'] = module.params.get("DryRun")
+
+ if module.params.get("DhcpOptionsIds"):
+ params['DhcpOptionsIds'] = module.params.get("DhcpOptionsIds")
+
+ try:
+ all_dhcp_options = client.describe_dhcp_options(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+ for dhcp_option in all_dhcp_options['DhcpOptions']:
+ all_dhcp_options_array.append(get_dhcp_options_info(dhcp_option))
+
+ snaked_dhcp_options_array = []
+ for dhcp_option in all_dhcp_options_array:
+ snaked_dhcp_options_array.append(camel_dict_to_snake_dict(dhcp_option))
+
+ module.exit_json(dhcp_options=snaked_dhcp_options_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(type='dict', default=None, ),
+ DryRun = dict(type='bool', default=False),
+ DhcpOptionsIds = dict(type='list', default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ # Validate Requirements
+ if not HAS_BOTO3:
+ module.fail_json(msg='json and botocore/boto3 is required.')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ # call your function here
+ results = list_dhcp_options(connection, module)
+
+ module.exit_json(result=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_igw.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_igw.py
new file mode 100644
index 0000000000..a4e58faac8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_igw.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+version_added: "2.0"
+author: Robert Estelle (@erydo)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ default: null
+ state:
+ description:
+ - Create or terminate the IGW
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
+ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+register: igw
+
+'''
+
+import sys # noqa
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+
+class AnsibleIGWException(Exception):
+ pass
+
+
+def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if not igws:
+ return {'changed': False}
+
+ if check_mode:
+ return {'changed': True}
+
+ for igw in igws:
+ try:
+ vpc_conn.detach_internet_gateway(igw.id, vpc_id)
+ vpc_conn.delete_internet_gateway(igw.id)
+ except EC2ResponseError as e:
+ raise AnsibleIGWException(
+ 'Unable to delete Internet Gateway, error: {0}'.format(e))
+
+ return {'changed': True}
+
+
+def ensure_igw_present(vpc_conn, vpc_id, check_mode):
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if len(igws) > 1:
+ raise AnsibleIGWException(
+ 'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
+ .format(vpc_id))
+
+ if igws:
+ return {'changed': False, 'gateway_id': igws[0].id}
+ else:
+ if check_mode:
+ return {'changed': True, 'gateway_id': None}
+
+ try:
+ igw = vpc_conn.create_internet_gateway()
+ vpc_conn.attach_internet_gateway(igw.id, vpc_id)
+ return {'changed': True, 'gateway_id': igw.id}
+ except EC2ResponseError as e:
+ raise AnsibleIGWException(
+ 'Unable to create Internet Gateway, error: {0}'.format(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ vpc_id = dict(required=True),
+ state = dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ vpc_id = module.params.get('vpc_id')
+ state = module.params.get('state', 'present')
+
+ try:
+ if state == 'present':
+ result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
+ except AnsibleIGWException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import * # noqa
+from ansible.module_utils.ec2 import * # noqa
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl.py
new file mode 100644
index 0000000000..73eafbc848
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl.py
@@ -0,0 +1,546 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+module: ec2_vpc_nacl
+short_description: create and delete Network ACLs.
+description:
+ - Read the AWS documentation for Network ACLS
+ U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Tagged name identifying a network ACL.
+ required: true
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ required: true
+ subnets:
+ description:
+ - The list of subnets that should be associated with the network ACL.
+ - Must be specified as a list
+ - Each subnet can be specified as subnet ID, or its tagged name.
+ required: false
+ egress:
+ description:
+ - A list of rules for outgoing traffic.
+ - Each rule must be specified as a list.
+ required: false
+ ingress:
+ description:
+ - List of rules for incoming traffic.
+ - Each rule must be specified as a list.
+ required: false
+ tags:
+ description:
+ - Dictionary of tags to look for and apply when creating a network ACL.
+ required: false
+ state:
+ description:
+ - Creates or modifies an existing NACL
+ - Deletes a NACL and reassociates subnets to the default NACL
+ required: false
+ choices: ['present', 'absent']
+ default: present
+author: Mike Mochan(@mmochan)
+extends_documentation_fragment: aws
+requirements: [ botocore, boto3, json ]
+'''
+
+EXAMPLES = '''
+
+# Complete example to create and delete a network ACL
+# that allows SSH, HTTP and ICMP in, and all traffic out.
+- name: "Create and associate production DMZ network ACL with DMZ subnets"
+ ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets: ['prod-dmz-1', 'prod-dmz-2']
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ ingress: [
+ # rule no, protocol, allow/deny, cidr, icmp_code, icmp_type,
+ # port from, port to
+ [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22],
+ [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80],
+ [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8],
+ ]
+ egress: [
+ [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ ]
+ state: 'present'
+
+- name: "Remove the ingress and egress rules - defaults to deny all"
+ ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets:
+ - prod-dmz-1
+ - prod-dmz-2
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ state: present
+
+- name: "Remove the NACL subnet associations and tags"
+ ec2_vpc_nacl:
+ vpc_id: 'vpc-12345678'
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ state: present
+
+- name: "Delete nacl and subnet associations"
+ ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ state: absent
+'''
+RETURN = '''
+task:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dictionary
+'''
+
+try:
+ import json
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+# Common fields for the default rule that is contained within every VPC NACL.
+DEFAULT_RULE_FIELDS = {
+ 'RuleNumber': 32767,
+ 'RuleAction': 'deny',
+ 'CidrBlock': '0.0.0.0/0',
+ 'Protocol': '-1'
+}
+
+DEFAULT_INGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', False)])
+DEFAULT_EGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', True)])
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
+
+
+#Utility methods
+def icmp_present(entry):
+ if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
+ return True
+
+
+def load_tags(module):
+ tags = []
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').iteritems():
+ tags.append({'Key': name, 'Value': str(value)})
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ else:
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ return tags
+
+
+def subnets_removed(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnet_ids if subnet not in subnets]
+
+
+def subnets_added(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnets if subnet not in subnet_ids]
+
+
+def subnets_changed(nacl, client, module):
+ changed = False
+ response = {}
+ vpc_id = module.params.get('vpc_id')
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ subnets = subnets_to_associate(nacl, client, module)
+ if not subnets:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
+ if subnets:
+ replace_network_acl_association(default_nacl_id, subnets, client, module)
+ changed = True
+ return changed
+ changed = False
+ return changed
+ subs_added = subnets_added(nacl_id, subnets, client, module)
+ if subs_added:
+ replace_network_acl_association(nacl_id, subs_added, client, module)
+ changed = True
+ subs_removed = subnets_removed(nacl_id, subnets, client, module)
+ if subs_removed:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ replace_network_acl_association(default_nacl_id, subs_removed, client, module)
+ changed = True
+ return changed
+
+
+def nacls_changed(nacl, client, module):
+ changed = False
+ params = dict()
+ params['egress'] = module.params.get('egress')
+ params['ingress'] = module.params.get('ingress')
+
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ nacl = describe_network_acl(client, module)
+ entries = nacl['NetworkAcls'][0]['Entries']
+ tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
+ tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
+ egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
+ ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
+ if rules_changed(egress, params['egress'], True, nacl_id, client, module):
+ changed = True
+ if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
+ changed = True
+ return changed
+
+
+def tags_changed(nacl_id, client, module):
+ changed = False
+ tags = dict()
+ if module.params.get('tags'):
+ tags = module.params.get('tags')
+ tags['Name'] = module.params.get('name')
+ nacl = find_acl_by_id(nacl_id, client, module)
+ if nacl['NetworkAcls']:
+ nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
+ nacl_tags = [item for sublist in nacl_values for item in sublist]
+ tag_values = [[key, str(value)] for key, value in tags.iteritems()]
+ tags = [item for sublist in tag_values for item in sublist]
+ if sorted(nacl_tags) == sorted(tags):
+ changed = False
+ return changed
+ else:
+ delete_tags(nacl_id, client, module)
+ create_tags(nacl_id, client, module)
+ changed = True
+ return changed
+ return changed
+
+
+def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
+ changed = False
+ rules = list()
+ for entry in param_rules:
+ rules.append(process_rule_entry(entry, Egress))
+ if rules == aws_rules:
+ return changed
+ else:
+ removed_rules = [x for x in aws_rules if x not in rules]
+ if removed_rules:
+ params = dict()
+ for rule in removed_rules:
+ params['NetworkAclId'] = nacl_id
+ params['RuleNumber'] = rule['RuleNumber']
+ params['Egress'] = Egress
+ delete_network_acl_entry(params, client, module)
+ changed = True
+ added_rules = [x for x in rules if x not in aws_rules]
+ if added_rules:
+ for rule in added_rules:
+ rule['NetworkAclId'] = nacl_id
+ create_network_acl_entry(rule, client, module)
+ changed = True
+ return changed
+
+
+def process_rule_entry(entry, Egress):
+ params = dict()
+ params['RuleNumber'] = entry[0]
+ params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
+ params['RuleAction'] = entry[2]
+ params['Egress'] = Egress
+ params['CidrBlock'] = entry[3]
+ if icmp_present(entry):
+ params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
+ else:
+ if entry[6] or entry[7]:
+ params['PortRange'] = {"From": entry[6], 'To': entry[7]}
+ return params
+
+
+def restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ if assoc_ids:
+ params = dict()
+ params['NetworkAclId'] = default_nacl_id[0]
+ for assoc_id in assoc_ids:
+ params['AssociationId'] = assoc_id
+ restore_default_acl_association(params, client, module)
+ return True
+
+
+def construct_acl_entries(nacl, client, module):
+ for entry in module.params.get('ingress'):
+ params = process_rule_entry(entry, Egress=False)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+ for rule in module.params.get('egress'):
+ params = process_rule_entry(rule, Egress=True)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+
+
+## Module invocations
+def setup_network_acl(client, module):
+ changed = False
+ nacl = describe_network_acl(client, module)
+ if not nacl['NetworkAcls']:
+ nacl = create_network_acl(module.params.get('vpc_id'), client, module)
+ nacl_id = nacl['NetworkAcl']['NetworkAclId']
+ create_tags(nacl_id, client, module)
+ subnets = subnets_to_associate(nacl, client, module)
+ replace_network_acl_association(nacl_id, subnets, client, module)
+ construct_acl_entries(nacl, client, module)
+ changed = True
+ return(changed, nacl['NetworkAcl']['NetworkAclId'])
+ else:
+ changed = False
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ subnet_result = subnets_changed(nacl, client, module)
+ nacl_result = nacls_changed(nacl, client, module)
+ tag_result = tags_changed(nacl_id, client, module)
+ if subnet_result is True or nacl_result is True or tag_result is True:
+ changed = True
+ return(changed, nacl_id)
+ return (changed, nacl_id)
+
+
+def remove_network_acl(client, module):
+ changed = False
+ result = dict()
+ vpc_id = module.params.get('vpc_id')
+ nacl = describe_network_acl(client, module)
+ if nacl['NetworkAcls']:
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ associations = nacl['NetworkAcls'][0]['Associations']
+ assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
+ if not default_nacl_id:
+ result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
+ return changed, result
+ if restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ if not assoc_ids:
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ return changed, result
+
+
+#Boto3 client methods
+def create_network_acl(vpc_id, client, module):
+ try:
+ nacl = client.create_network_acl(VpcId=vpc_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return nacl
+
+
+def create_network_acl_entry(params, client, module):
+ try:
+ result = client.create_network_acl_entry(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return result
+
+
+def create_tags(nacl_id, client, module):
+ try:
+ delete_tags(nacl_id, client, module)
+ client.create_tags(Resources=[nacl_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_network_acl(nacl_id, client, module):
+ try:
+ client.delete_network_acl(NetworkAclId=nacl_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_network_acl_entry(params, client, module):
+ try:
+ client.delete_network_acl_entry(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_tags(nacl_id, client, module):
+ try:
+ client.delete_tags(Resources=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def describe_acl_associations(subnets, client, module):
+ if not subnets:
+ return []
+ try:
+ results = client.describe_network_acls(Filters=[
+ {'Name': 'association.subnet-id', 'Values': subnets}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ associations = results['NetworkAcls'][0]['Associations']
+ return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
+
+
+def describe_network_acl(client, module):
+ try:
+ nacl = client.describe_network_acls(Filters=[
+ {'Name': 'tag:Name', 'Values': [module.params.get('name')]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return nacl
+
+
+def find_acl_by_id(nacl_id, client, module):
+ try:
+ return client.describe_network_acls(NetworkAclIds=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def find_default_vpc_nacl(vpc_id, client, module):
+ try:
+ response = client.describe_network_acls(Filters=[
+ {'Name': 'vpc-id', 'Values': [vpc_id]}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ nacls = response['NetworkAcls']
+ return [n['NetworkAclId'] for n in nacls if n['IsDefault'] == True]
+
+
+def find_subnet_ids_by_nacl_id(nacl_id, client, module):
+ try:
+ results = client.describe_network_acls(Filters=[
+ {'Name': 'association.network-acl-id', 'Values': [nacl_id]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ if results['NetworkAcls']:
+ associations = results['NetworkAcls'][0]['Associations']
+ return [s['SubnetId'] for s in associations if s['SubnetId']]
+ else:
+ return []
+
+
+def replace_network_acl_association(nacl_id, subnets, client, module):
+ params = dict()
+ params['NetworkAclId'] = nacl_id
+ for association in describe_acl_associations(subnets, client, module):
+ params['AssociationId'] = association
+ try:
+ client.replace_network_acl_association(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
+ params = dict()
+ for entry in entries:
+ params = entry
+ params['NetworkAclId'] = nacl_id
+ try:
+ client.replace_network_acl_entry(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def restore_default_acl_association(params, client, module):
+ try:
+ client.replace_network_acl_association(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def subnets_to_associate(nacl, client, module):
+ params = list(module.params.get('subnets'))
+ if not params:
+ return []
+ if params[0].startswith("subnet-"):
+ try:
+ subnets = client.describe_subnets(Filters=[
+ {'Name': 'subnet-id', 'Values': params}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ subnets = client.describe_subnets(Filters=[
+ {'Name': 'tag:Name', 'Values': params}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']]
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ vpc_id=dict(required=True),
+ name=dict(required=True),
+ subnets=dict(required=False, type='list', default=list()),
+ tags=dict(required=False, type='dict'),
+ ingress=dict(required=False, type='list', default=list()),
+ egress=dict(required=False, type='list', default=list(),),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='json, botocore and boto3 are required.')
+ state = module.params.get('state').lower()
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError, e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ invocations = {
+ "present": setup_network_acl,
+ "absent": remove_network_acl
+ }
+ (changed, results) = invocations[state](client, module)
+ module.exit_json(changed=changed, nacl_id=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl_facts.py
new file mode 100644
index 0000000000..b809642c71
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nacl_facts.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_nacl_facts
+short_description: Gather facts about Network ACLs in an AWS VPC
+description:
+ - Gather facts about Network ACLs in an AWS VPC
+version_added: "2.2"
+author: "Brad Davidson (@brandond)"
+requires: [ boto3 ]
+options:
+ nacl_ids:
+ description:
+ - A list of Network ACL IDs to retrieve facts about.
+ required: false
+ default: []
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
+ U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter \
+ names and values are case sensitive.
+ required: false
+ default: {}
+notes:
+ - By default, the module will return all Network ACLs.
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all Network ACLs:
+- name: Get All NACLs
+ register: all_nacls
+ ec2_vpc_nacl_facts:
+ region: us-west-2
+
+# Retrieve default Network ACLs:
+- name: Get Default NACLs
+ register: default_nacls
+ ec2_vpc_nacl_facts:
+ region: us-west-2
+ filters:
+ 'default': 'true'
+'''
+
+RETURN = '''
+nacl:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: list of complex
+ contains:
+ nacl_id:
+ description: The ID of the Network Access Control List.
+ returned: always
+ type: string
+ vpc_id:
+ description: The ID of the VPC that the NACL is attached to.
+ returned: always
+ type: string
+ is_default:
+ description: True if the NACL is the default for its VPC.
+ returned: always
+ type: boolean
+ tags:
+ description: A dict of tags associated with the NACL.
+ returned: always
+ type: dict
+ subnets:
+ description: A list of subnet IDs that are associated with the NACL.
+ returned: always
+ type: list of string
+ ingress:
+ description: A list of NACL ingress rules.
+ returned: always
+ type: list of list
+ egress:
+ description: A list of NACL egress rules.
+ returned: always
+ type: list of list
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
+
+def list_ec2_vpc_nacls(connection, module):
+
+ nacl_ids = module.params.get("nacl_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ nacls = connection.describe_network_acls(NetworkAclIds=nacl_ids, Filters=filters)
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_nacls = []
+ for nacl in nacls['NetworkAcls']:
+ snaked_nacls.append(camel_dict_to_snake_dict(nacl))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for nacl in snaked_nacls:
+ if 'tags' in nacl:
+ nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'])
+ if 'entries' in nacl:
+ nacl['egress'] = [nacl_entry_to_list(e) for e in nacl['entries']
+ if e['rule_number'] != 32767 and e['egress']]
+ nacl['ingress'] = [nacl_entry_to_list(e) for e in nacl['entries']
+ if e['rule_number'] != 32767 and not e['egress']]
+ del nacl['entries']
+ if 'associations' in nacl:
+ nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
+ del nacl['associations']
+ if 'network_acl_id' in nacl:
+ nacl['nacl_id'] = nacl['network_acl_id']
+ del nacl['network_acl_id']
+
+ module.exit_json(nacls=snaked_nacls)
+
+def nacl_entry_to_list(entry):
+
+ elist = [entry['rule_number'],
+ PROTOCOL_NAMES[entry['protocol']],
+ entry['rule_action'],
+ entry['cidr_block']
+ ]
+ if entry['protocol'] == '1':
+ elist = elist + [-1, -1]
+ else:
+ elist = elist + [None, None, None, None]
+
+ if 'icmp_type_code' in entry:
+ elist[4] = entry['icmp_type_code']['type']
+ elist[5] = entry['icmp_type_code']['code']
+
+ if 'port_range' in entry:
+ elist[6] = entry['port_range']['from']
+ elist[7] = entry['port_range']['to']
+
+ return elist
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ nacl_ids=dict(default=[], type='list'),
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['nacl_ids', 'filters']
+ ]
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2',
+ region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_nacls(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nat_gateway.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nat_gateway.py
new file mode 100644
index 0000000000..ee53d7bb13
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_nat_gateway.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_nat_gateway
+short_description: Manage AWS VPC NAT Gateways.
+description:
+ - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
+version_added: "2.2"
+requirements: [boto3, botocore]
+options:
+ state:
+ description:
+ - Ensure NAT Gateway is present or absent.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ nat_gateway_id:
+ description:
+ - The id AWS dynamically allocates to the NAT Gateway on creation.
+ This is required when the absent option is present.
+ required: false
+ default: None
+ subnet_id:
+ description:
+ - The id of the subnet to create the NAT Gateway in. This is required
+ with the present option.
+ required: false
+ default: None
+ allocation_id:
+ description:
+ - The id of the elastic IP allocation. If this is not passed and the
+ eip_address is not passed. An EIP is generated for this NAT Gateway.
+ required: false
+ default: None
+ eip_address:
+ description:
+ - The elastic IP address of the EIP you want attached to this NAT Gateway.
+ If this is not passed and the allocation_id is not passed,
+ an EIP is generated for this NAT Gateway.
+ required: false
+ if_exist_do_not_create:
+ description:
+ - if a NAT Gateway exists already in the subnet_id, then do not create a new one.
+ required: false
+ default: false
+ release_eip:
+ description:
+ - Deallocate the EIP from the VPC.
+ - Option is only valid with the absent state.
+ - You should use this with the wait option. Since you can not release an address while a delete operation is happening.
+ required: false
+ default: true
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ required: false
+ default: false
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ required: false
+ default: 300
+ client_token:
+ description:
+ - Optional unique token to be used during create to ensure idempotency.
+ When specifying this option, ensure you specify the eip_address parameter
+ as well otherwise any subsequent runs will fail.
+ required: false
+
+author:
+ - "Allen Sanabria (@linuxdynasty)"
+ - "Jon Hadfield (@jonhadfield)"
+ - "Karen Cheng(@Etherdaemon)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create new nat gateway with client token.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ region: ap-southeast-2
+ client_token: abcd-12345678
+ register: new_nat_gateway
+
+- name: Create new nat gateway using an allocation-id.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway, using an EIP address and wait for available status.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ wait: yes
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: yes
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: yes
+ region: ap-southeast-2
+ if_exist_do_not_create: true
+ register: new_nat_gateway
+
+- name: Delete nat gateway using discovered nat gateways from facts module.
+ ec2_vpc_nat_gateway:
+ state: absent
+ region: ap-southeast-2
+ wait: yes
+ nat_gateway_id: "{{ item.NatGatewayId }}"
+ release_eip: yes
+ register: delete_nat_gateway_result
+ with_items: "{{ gateways_to_remove.result }}"
+
+- name: Delete nat gateway and wait for deleted status.
+ ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ wait: yes
+ wait_timeout: 500
+ region: ap-southeast-2
+
+- name: Delete nat gateway and release EIP.
+ ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ release_eip: yes
+ wait: yes
+ wait_timeout: 300
+ region: ap-southeast-2
+'''
+
+RETURN = '''
+create_time:
+ description: The ISO 8601 date time formatin UTC.
+ returned: In all cases.
+ type: string
+ sample: "2016-03-05T05:19:20.282000+00:00'"
+nat_gateway_id:
+ description: id of the VPC NAT Gateway
+ returned: In all cases.
+ type: string
+ sample: "nat-0d1e3a878585988f8"
+subnet_id:
+ description: id of the Subnet
+ returned: In all cases.
+ type: string
+ sample: "subnet-12345"
+state:
+ description: The current state of the NAT Gateway.
+ returned: In all cases.
+ type: string
+ sample: "available"
+vpc_id:
+ description: id of the VPC.
+ returned: In all cases.
+ type: string
+ sample: "vpc-12345"
+nat_gateway_addresses:
+ description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
+ returned: In all cases.
+ type: string
+ sample: [
+ {
+ 'public_ip': '52.52.52.52',
+ 'network_interface_id': 'eni-12345',
+ 'private_ip': '10.0.0.100',
+ 'allocation_id': 'eipalloc-12345'
+ }
+ ]
+'''
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+import datetime
+import random
+import re
+import time
+
+from dateutil.tz import tzutc
+
+DRY_RUN_GATEWAYS = [
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "available",
+ "create_time": "2016-03-05T05:19:20.282000+00:00",
+ "vpc_id": "vpc-12345678"
+ }
+]
+DRY_RUN_GATEWAY_UNCONVERTED = [
+ {
+ 'VpcId': 'vpc-12345678',
+ 'State': 'available',
+ 'NatGatewayId': 'nat-123456789',
+ 'SubnetId': 'subnet-123456789',
+ 'NatGatewayAddresses': [
+ {
+ 'PublicIp': '55.55.55.55',
+ 'NetworkInterfaceId': 'eni-1234567',
+ 'AllocationId': 'eipalloc-1234567',
+ 'PrivateIp': '10.0.0.102'
+ }
+ ],
+ 'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc())
+ }
+]
+
+DRY_RUN_ALLOCATION_UNCONVERTED = {
+ 'Addresses': [
+ {
+ 'PublicIp': '55.55.55.55',
+ 'Domain': 'vpc',
+ 'AllocationId': 'eipalloc-1234567'
+ }
+ ]
+}
+
+DRY_RUN_MSGS = 'DryRun Mode:'
+
+
+def convert_to_lower(data):
+ """Convert all uppercase keys in dict with lowercase_
+
+ Args:
+ data (dict): Dictionary with keys that have upper cases in them
+ Example.. FooBar == foo_bar
+ if a val is of type datetime.datetime, it will be converted to
+ the ISO 8601
+
+ Basic Usage:
+ >>> test = {'FooBar': []}
+ >>> test = convert_to_lower(test)
+ {
+ 'foo_bar': []
+ }
+
+ Returns:
+ Dictionary
+ """
+ results = dict()
+ if isinstance(data, dict):
+ for key, val in data.items():
+ key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
+ if key[0] == '_':
+ key = key[1:]
+ if isinstance(val, datetime.datetime):
+ results[key] = val.isoformat()
+ elif isinstance(val, dict):
+ results[key] = convert_to_lower(val)
+ elif isinstance(val, list):
+ converted = list()
+ for item in val:
+ converted.append(convert_to_lower(item))
+ results[key] = converted
+ else:
+ results[key] = val
+ return results
+
+
+def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
+ states=None, check_mode=False):
+ """Retrieve a list of NAT Gateways
+ Args:
+ client (botocore.client.EC2): Boto3 client
+
+ Kwargs:
+ subnet_id (str): The subnet_id the nat resides in.
+ nat_gateway_id (str): The Amazon nat id.
+ states (list): States available (pending, failed, available, deleting, and deleted)
+ default=None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-12345678'
+ >>> get_nat_gateways(client, subnet_id)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-12345678"
+ }
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = dict()
+ err_msg = ""
+ gateways_retrieved = False
+ existing_gateways = list()
+ if not states:
+ states = ['available', 'pending']
+ if nat_gateway_id:
+ params['NatGatewayIds'] = [nat_gateway_id]
+ else:
+ params['Filter'] = [
+ {
+ 'Name': 'subnet-id',
+ 'Values': [subnet_id]
+ },
+ {
+ 'Name': 'state',
+ 'Values': states
+ }
+ ]
+
+ try:
+ if not check_mode:
+ gateways = client.describe_nat_gateways(**params)['NatGateways']
+ if gateways:
+ for gw in gateways:
+ existing_gateways.append(convert_to_lower(gw))
+ gateways_retrieved = True
+ else:
+ gateways_retrieved = True
+ if nat_gateway_id:
+ if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
+ existing_gateways = DRY_RUN_GATEWAYS
+ elif subnet_id:
+ if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
+ existing_gateways = DRY_RUN_GATEWAYS
+ err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return gateways_retrieved, err_msg, existing_gateways
+
+
+def wait_for_status(client, wait_timeout, nat_gateway_id, status,
+ check_mode=False):
+ """Wait for the NAT Gateway to reach a status
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ nat_gateway_id (str): The Amazon nat id.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-12345678'
+ >>> allocation_id = 'eipalloc-12345678'
+ >>> wait_for_status(client, subnet_id, allocation_id)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-1234567",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-12345678"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-12345677"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ nat_gateway = dict()
+ states = ['pending', 'failed', 'available', 'deleting', 'deleted']
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ gws_retrieved, err_msg, nat_gateways = (
+ get_nat_gateways(
+ client, nat_gateway_id=nat_gateway_id,
+ states=states, check_mode=check_mode
+ )
+ )
+ if gws_retrieved and nat_gateways:
+ nat_gateway = nat_gateways[0]
+ if check_mode:
+ nat_gateway['state'] = status
+
+ if nat_gateway.get('state') == status:
+ status_achieved = True
+ break
+
+ elif nat_gateway.get('state') == 'failed':
+ err_msg = nat_gateway.get('failure_message')
+ break
+
+ elif nat_gateway.get('state') == 'pending':
+ if 'failure_message' in nat_gateway:
+ err_msg = nat_gateway.get('failure_message')
+ status_achieved = False
+ break
+
+ else:
+ time.sleep(polling_increment_secs)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+
+ return status_achieved, err_msg, nat_gateway
+
+
+def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
+ check_mode=False):
+ """Retrieve all NAT Gateways for a subnet.
+ Args:
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
+ (
+ [
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-1234567"
+ }
+ ],
+ False
+ )
+
+ Returns:
+ Tuple (list, bool)
+ """
+ allocation_id_exists = False
+ gateways = []
+ states = ['available', 'pending']
+ gws_retrieved, _, gws = (
+ get_nat_gateways(
+ client, subnet_id, states=states, check_mode=check_mode
+ )
+ )
+ if not gws_retrieved:
+ return gateways, allocation_id_exists
+ for gw in gws:
+ for address in gw['nat_gateway_addresses']:
+ if allocation_id:
+ if address.get('allocation_id') == allocation_id:
+ allocation_id_exists = True
+ gateways.append(gw)
+ else:
+ gateways.append(gw)
+
+ return gateways, allocation_id_exists
+
+
+def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ eip_address (str): The Elastic IP Address of the EIP.
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> eip_address = '52.87.29.36'
+ >>> get_eip_allocation_id_by_address(client, eip_address)
+ 'eipalloc-36014da3'
+
+ Returns:
+ Tuple (str, str)
+ """
+ params = {
+ 'PublicIps': [eip_address],
+ }
+ allocation_id = None
+ err_msg = ""
+ try:
+ if not check_mode:
+ allocations = client.describe_addresses(**params)['Addresses']
+ if len(allocations) == 1:
+ allocation = allocations[0]
+ else:
+ allocation = None
+ else:
+ dry_run_eip = (
+ DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
+ )
+ if dry_run_eip == eip_address:
+ allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
+ else:
+ allocation = None
+ if allocation:
+ if allocation.get('Domain') != 'vpc':
+ err_msg = (
+ "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
+ .format(eip_address)
+ )
+ else:
+ allocation_id = allocation.get('AllocationId')
+ else:
+ err_msg = (
+ "EIP {0} does not exist".format(eip_address)
+ )
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return allocation_id, err_msg
+
+
+def allocate_eip_address(client, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> allocate_eip_address(client)
+ True
+
+ Returns:
+ Tuple (bool, str)
+ """
+ ip_allocated = False
+ new_eip = None
+ err_msg = ''
+ params = {
+ 'Domain': 'vpc',
+ }
+ try:
+ if check_mode:
+ ip_allocated = True
+ random_numbers = (
+ ''.join(str(x) for x in random.sample(range(0, 9), 7))
+ )
+ new_eip = 'eipalloc-{0}'.format(random_numbers)
+ else:
+ new_eip = client.allocate_address(**params)['AllocationId']
+ ip_allocated = True
+ err_msg = 'eipalloc id {0} created'.format(new_eip)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return ip_allocated, err_msg, new_eip
+
+
+def release_address(client, allocation_id, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ allocation_id (str): The eip Amazon identifier.
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> allocation_id = "eipalloc-123456"
+ >>> release_address(client, allocation_id)
+ True
+
+ Returns:
+ Boolean, string
+ """
+ err_msg = ''
+ if check_mode:
+ return True, ''
+
+ ip_released = False
+ params = {
+ 'AllocationId': allocation_id,
+ }
+ try:
+ client.release_address(**params)
+ ip_released = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return ip_released, err_msg
+
+
+def create(client, subnet_id, allocation_id, client_token=None,
+ wait=False, wait_timeout=0, if_exist_do_not_create=False,
+ check_mode=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ subnet_id (str): The subnet_id the nat resides in.
+ allocation_id (str): The eip Amazon identifier.
+
+ Kwargs:
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ default = 0
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-1234567",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-1234567"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = {
+ 'SubnetId': subnet_id,
+ 'AllocationId': allocation_id
+ }
+ request_time = datetime.datetime.utcnow()
+ changed = False
+ success = False
+ token_provided = False
+ err_msg = ""
+
+ if client_token:
+ token_provided = True
+ params['ClientToken'] = client_token
+
+ try:
+ if not check_mode:
+ result = client.create_nat_gateway(**params)["NatGateway"]
+ else:
+ result = DRY_RUN_GATEWAY_UNCONVERTED[0]
+ result['CreateTime'] = datetime.datetime.utcnow()
+ result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id
+ result['SubnetId'] = subnet_id
+
+ success = True
+ changed = True
+ create_time = result['CreateTime'].replace(tzinfo=None)
+ if token_provided and (request_time > create_time):
+ changed = False
+ elif wait:
+ success, err_msg, result = (
+ wait_for_status(
+ client, wait_timeout, result['NatGatewayId'], 'available',
+ check_mode=check_mode
+ )
+ )
+ if success:
+ err_msg = (
+ 'NAT gateway {0} created'.format(result['nat_gateway_id'])
+ )
+
+ except botocore.exceptions.ClientError as e:
+ if "IdempotentParameterMismatch" in e.message:
+ err_msg = (
+ 'NAT Gateway does not support update and token has already been provided'
+ )
+ else:
+ err_msg = str(e)
+ success = False
+ changed = False
+ result = None
+
+ return success, changed, err_msg, result
+
+
+def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
+ if_exist_do_not_create=False, wait=False, wait_timeout=0,
+ client_token=None, check_mode=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+ eip_address (str): The Elastic IP Address of the EIP.
+ default = None
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ default = 0
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-w4t12897'
+ >>> allocation_id = 'eipalloc-36014da3'
+ >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "subnet_id": "subnet-w4t12897",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, bool, str, list)
+ """
+ success = False
+ changed = False
+ err_msg = ""
+ results = list()
+
+ if not allocation_id and not eip_address:
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
+ )
+
+ if len(existing_gateways) > 0 and if_exist_do_not_create:
+ success = True
+ changed = False
+ results = existing_gateways[0]
+ err_msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return success, changed, err_msg, results
+ else:
+ success, err_msg, allocation_id = (
+ allocate_eip_address(client, check_mode=check_mode)
+ )
+ if not success:
+ return success, 'False', err_msg, dict()
+
+ elif eip_address or allocation_id:
+ if eip_address and not allocation_id:
+ allocation_id, err_msg = (
+ get_eip_allocation_id_by_address(
+ client, eip_address, check_mode=check_mode
+ )
+ )
+ if not allocation_id:
+ success = False
+ changed = False
+ return success, changed, err_msg, dict()
+
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(
+ client, subnet_id, allocation_id, check_mode=check_mode
+ )
+ )
+ if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
+ success = True
+ changed = False
+ results = existing_gateways[0]
+ err_msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return success, changed, err_msg, results
+
+ success, changed, err_msg, results = create(
+ client, subnet_id, allocation_id, client_token,
+ wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
+ )
+
+ return success, changed, err_msg, results
+
+
+def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
+ release_eip=False, check_mode=False):
+ """Delete an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ nat_gateway_id (str): The Amazon nat id.
+
+ Kwargs:
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> nat_gw_id = 'nat-03835afb6e31df79b'
+ >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "subnet_id": "subnet-w4t12897",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = {
+ 'NatGatewayId': nat_gateway_id
+ }
+ success = False
+ changed = False
+ err_msg = ""
+ results = list()
+ states = ['pending', 'available' ]
+ try:
+ exist, _, gw = (
+ get_nat_gateways(
+ client, nat_gateway_id=nat_gateway_id,
+ states=states, check_mode=check_mode
+ )
+ )
+ if exist and len(gw) == 1:
+ results = gw[0]
+ if not check_mode:
+ client.delete_nat_gateway(**params)
+
+ allocation_id = (
+ results['nat_gateway_addresses'][0]['allocation_id']
+ )
+ changed = True
+ success = True
+ err_msg = (
+ 'NAT gateway {0} is in a deleting state. Delete was successfull'
+ .format(nat_gateway_id)
+ )
+
+ if wait:
+ status_achieved, err_msg, results = (
+ wait_for_status(
+ client, wait_timeout, nat_gateway_id, 'deleted',
+ check_mode=check_mode
+ )
+ )
+ if status_achieved:
+ err_msg = (
+ 'NAT gateway {0} was deleted successfully'
+ .format(nat_gateway_id)
+ )
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if release_eip:
+ eip_released, eip_err = (
+ release_address(client, allocation_id, check_mode)
+ )
+ if not eip_released:
+ err_msg = (
+ "{0}: Failed to release EIP {1}: {2}"
+ .format(err_msg, allocation_id, eip_err)
+ )
+ success = False
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ subnet_id=dict(type='str'),
+ eip_address=dict(type='str'),
+ allocation_id=dict(type='str'),
+ if_exist_do_not_create=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=320, required=False),
+ release_eip=dict(type='bool', default=False),
+ nat_gateway_id=dict(type='str'),
+ client_token=dict(type='str'),
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['allocation_id', 'eip_address']
+ ]
+ )
+
+ # Validate Requirements
+ if not HAS_BOTO3:
+ module.fail_json(msg='botocore/boto3 is required.')
+
+ state = module.params.get('state').lower()
+ check_mode = module.check_mode
+ subnet_id = module.params.get('subnet_id')
+ allocation_id = module.params.get('allocation_id')
+ eip_address = module.params.get('eip_address')
+ nat_gateway_id = module.params.get('nat_gateway_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ release_eip = module.params.get('release_eip')
+ client_token = module.params.get('client_token')
+ if_exist_do_not_create = module.params.get('if_exist_do_not_create')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = (
+ get_aws_connection_info(module, boto3=True)
+ )
+ client = (
+ boto3_conn(
+ module, conn_type='client', resource='ec2',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs
+ )
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
+
+ changed = False
+ err_msg = ''
+
+ if state == 'present':
+ if not subnet_id:
+ module.fail_json(msg='subnet_id is required for creation')
+
+ success, changed, err_msg, results = (
+ pre_create(
+ client, subnet_id, allocation_id, eip_address,
+ if_exist_do_not_create, wait, wait_timeout,
+ client_token, check_mode=check_mode
+ )
+ )
+ else:
+ if not nat_gateway_id:
+ module.fail_json(msg='nat_gateway_id is required for removal')
+
+ else:
+ success, changed, err_msg, results = (
+ remove(
+ client, nat_gateway_id, wait, wait_timeout, release_eip,
+ check_mode=check_mode
+ )
+ )
+
+ if not success:
+ module.fail_json(
+ msg=err_msg, success=success, changed=changed
+ )
+ else:
+ module.exit_json(
+ msg=err_msg, success=success, changed=changed, **results
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_net_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_net_facts.py
new file mode 100644
index 0000000000..8de47ed975
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_net_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net_facts
+short_description: Gather facts about ec2 VPCs in AWS
+description:
+ - Gather facts about ec2 VPCs in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
+ required: false
+ default: null
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all VPCs
+- ec2_vpc_net_facts:
+
+# Gather facts about a particular VPC using VPC ID
+- ec2_vpc_net_facts:
+ filters:
+ vpc-id: vpc-00112233
+
+# Gather facts about any VPC with a tag key Name and value Example
+- ec2_vpc_net_facts:
+ filters:
+ "tag:Name": Example
+
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def get_vpc_info(vpc):
+
+ try:
+ classic_link = vpc.classic_link_enabled
+ except AttributeError:
+ classic_link = False
+
+ vpc_info = { 'id': vpc.id,
+ 'instance_tenancy': vpc.instance_tenancy,
+ 'classic_link_enabled': classic_link,
+ 'dhcp_options_id': vpc.dhcp_options_id,
+ 'state': vpc.state,
+ 'is_default': vpc.is_default,
+ 'cidr_block': vpc.cidr_block,
+ 'tags': vpc.tags
+ }
+
+ return vpc_info
+
+def list_ec2_vpcs(connection, module):
+
+ filters = module.params.get("filters")
+ vpc_dict_array = []
+
+ try:
+ all_vpcs = connection.get_all_vpcs(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for vpc in all_vpcs:
+ vpc_dict_array.append(get_vpc_info(vpc))
+
+ module.exit_json(vpcs=vpc_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpcs(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_peer.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_peer.py
new file mode 100644
index 0000000000..3eb6582d0f
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_peer.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+module: ec2_vpc_peer
+short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
+description:
+ - Read the AWS documentation for VPC Peering Connections
+ U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html)
+version_added: "2.2"
+options:
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ required: false
+ peer_vpc_id:
+ description:
+ - VPC id of the accepting VPC.
+ required: false
+ peer_owner_id:
+ description:
+ - The AWS account number for cross account peering.
+ required: false
+ tags:
+ description:
+ - Dictionary of tags to look for and apply when creating a Peering Connection.
+ required: false
+ state:
+ description:
+ - Create, delete, accept, reject a peering connection.
+ required: false
+ default: present
+ choices: ['present', 'absent', 'accept', 'reject']
+author: Mike Mochan(@mmochan)
+extends_documentation_fragment: aws
+requirements: [ botocore, boto3, json ]
+'''
+
+EXAMPLES = '''
+# Complete example to create and accept a local peering connection.
+- name: Create local account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept local VPC peering request
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: accept
+ register: action_peer
+
+# Complete example to delete a local peering connection.
+- name: Create local account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: delete a local VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: absent
+ register: vpc_peer
+
+ # Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept peering connection from remote account
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ register: vpc_peer
+
+# Complete example to create and reject a local peering connection.
+- name: Create local account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a local VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: reject
+
+# Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept a cross account VPC peering connection request
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+
+# Complete example to create and reject a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: reject
+
+'''
+RETURN = '''
+task:
+ description: The result of the create, accept, reject or delete action.
+ returned: success
+ type: dictionary
+'''
+
+try:
+ import json
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def tags_changed(pcx_id, client, module):
+ changed = False
+ tags = dict()
+ if module.params.get('tags'):
+ tags = module.params.get('tags')
+ pcx = find_pcx_by_id(pcx_id, client, module)
+ if pcx['VpcPeeringConnections']:
+ pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']]
+ pcx_tags = [item for sublist in pcx_values for item in sublist]
+ tag_values = [[key, str(value)] for key, value in tags.iteritems()]
+ tags = [item for sublist in tag_values for item in sublist]
+ if sorted(pcx_tags) == sorted(tags):
+ changed = False
+ return changed
+ else:
+ delete_tags(pcx_id, client, module)
+ create_tags(pcx_id, client, module)
+ changed = True
+ return changed
+ return changed
+
+
+def describe_peering_connections(params, client):
+ result = client.describe_vpc_peering_connections(Filters=[
+ {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]},
+ {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}
+ ])
+ if result['VpcPeeringConnections'] == []:
+ result = client.describe_vpc_peering_connections(Filters=[
+ {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]},
+ {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]}
+ ])
+ return result
+
+
+def is_active(peering_conn):
+ return peering_conn['Status']['Code'] == 'active'
+
+
+def is_pending(peering_conn):
+ return peering_conn['Status']['Code'] == 'pending-acceptance'
+
+
+def create_peer_connection(client, module):
+ changed = False
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+ params['PeerVpcId'] = module.params.get('peer_vpc_id')
+ if module.params.get('peer_owner_id'):
+ params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ params['DryRun'] = module.check_mode
+ peering_conns = describe_peering_connections(params, client)
+ for peering_conn in peering_conns['VpcPeeringConnections']:
+ pcx_id = peering_conn['VpcPeeringConnectionId']
+ if tags_changed(pcx_id, client, module):
+ changed = True
+ if is_active(peering_conn):
+ return (changed, peering_conn['VpcPeeringConnectionId'])
+ if is_pending(peering_conn):
+ return (changed, peering_conn['VpcPeeringConnectionId'])
+ try:
+ peering_conn = client.create_vpc_peering_connection(**params)
+ pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
+ if module.params.get('tags'):
+ create_tags(pcx_id, client, module)
+ changed = True
+ return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def peer_status(client, module):
+ params = dict()
+ params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')]
+ vpc_peering_connection = client.describe_vpc_peering_connections(**params)
+ return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code']
+
+
+def accept_reject_delete(state, client, module):
+ changed = False
+ params = dict()
+ params['VpcPeeringConnectionId'] = module.params.get('peering_id')
+ params['DryRun'] = module.check_mode
+ invocations = {
+ 'accept': client.accept_vpc_peering_connection,
+ 'reject': client.reject_vpc_peering_connection,
+ 'absent': client.delete_vpc_peering_connection
+ }
+ if state == 'absent' or peer_status(client, module) != 'active':
+ try:
+ invocations[state](**params)
+ if module.params.get('tags'):
+ create_tags(params['VpcPeeringConnectionId'], client, module)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ if tags_changed(params['VpcPeeringConnectionId'], client, module):
+ changed = True
+ return changed, params['VpcPeeringConnectionId']
+
+
+def load_tags(module):
+ tags = []
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').iteritems():
+ tags.append({'Key': name, 'Value': str(value)})
+ return tags
+
+
+def create_tags(pcx_id, client, module):
+ try:
+ delete_tags(pcx_id, client, module)
+ client.create_tags(Resources=[pcx_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_tags(pcx_id, client, module):
+ try:
+ client.delete_tags(Resources=[pcx_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def find_pcx_by_id(pcx_id, client, module):
+ try:
+ return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ vpc_id=dict(),
+ peer_vpc_id=dict(),
+ peering_id=dict(),
+ peer_owner_id=dict(),
+ tags=dict(required=False, type='dict'),
+ profile=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'accept', 'reject'])
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='json, botocore and boto3 are required.')
+ state = module.params.get('state').lower()
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ if state == 'present':
+ (changed, results) = create_peer_connection(client, module)
+ module.exit_json(changed=changed, peering_id=results)
+ else:
+ (changed, results) = accept_reject_delete(state, client, module)
+ module.exit_json(changed=changed, peering_id=results)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table.py
new file mode 100644
index 0000000000..416e0b4304
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table.py
@@ -0,0 +1,634 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table
+short_description: Manage route tables for AWS virtual private clouds
+description:
+ - Manage route tables for AWS virtual private clouds
+version_added: "2.0"
+author: Robert Estelle (@erydo), Rob White (@wimnat)
+options:
+ lookup:
+ description:
+ - "Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. If no tags are specifed then no lookup for an existing route table is performed and a new route table will be created. To change tags of a route table, you must look up by id."
+ required: false
+ default: tag
+ choices: [ 'tag', 'id' ]
+ propagating_vgw_ids:
+ description:
+ - "Enable route propagation from virtual gateways specified by ID."
+ default: None
+ required: false
+ route_table_id:
+ description:
+ - "The ID of the route table to update or delete."
+ required: false
+ default: null
+ routes:
+ description:
+ - "List of routes in the route table.
+ Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
+ 'instance_id', 'interface_id', or 'vpc_peering_connection_id'.
+ If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states."
+ required: false
+ default: None
+ state:
+ description:
+ - "Create or destroy the VPC route table"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ subnets:
+ description:
+ - "An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'."
+ required: true
+ tags:
+ description:
+ - "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are used to uniquely identify route tables within a VPC when the route_table_id is not supplied."
+ required: false
+ default: null
+ aliases: [ "resource_tags" ]
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create the route table."
+ required: true
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up public subnet route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Public
+ subnets:
+ - "{{ jumpbox_subnet.subnet.id }}"
+ - "{{ frontend_subnet.subnet.id }}"
+ - "{{ vpn_subnet.subnet_id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: public_route_table
+
+- name: Set up NAT-protected route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Internal
+ subnets:
+ - "{{ application_subnet.subnet.id }}"
+ - 'Database Subnet'
+ - '10.0.0.0/8'
+ routes:
+ - dest: 0.0.0.0/0
+ instance_id: "{{ nat.instance_id }}"
+ register: nat_route_table
+
+'''
+
+
+import sys # noqa
+import re
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+
+class AnsibleRouteTableException(Exception):
+ pass
+
+
+class AnsibleIgwSearchException(AnsibleRouteTableException):
+ pass
+
+
+class AnsibleTagCreationException(AnsibleRouteTableException):
+ pass
+
+
+class AnsibleSubnetSearchException(AnsibleRouteTableException):
+ pass
+
+CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$')
+SUBNET_RE = re.compile('^subnet-[A-z0-9]+$')
+ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$')
+
+
+def find_subnets(vpc_conn, vpc_id, identified_subnets):
+ """
+ Finds a list of subnets, each identified either by a raw ID, a unique
+ 'Name' tag, or a CIDR such as 10.0.0.0/8.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into potentially be moved into a shared module_utils
+ """
+ subnet_ids = []
+ subnet_names = []
+ subnet_cidrs = []
+ for subnet in (identified_subnets or []):
+ if re.match(SUBNET_RE, subnet):
+ subnet_ids.append(subnet)
+ elif re.match(CIDR_RE, subnet):
+ subnet_cidrs.append(subnet)
+ else:
+ subnet_names.append(subnet)
+
+ subnets_by_id = []
+ if subnet_ids:
+ subnets_by_id = vpc_conn.get_all_subnets(
+ subnet_ids, filters={'vpc_id': vpc_id})
+
+ for subnet_id in subnet_ids:
+ if not any(s.id == subnet_id for s in subnets_by_id):
+ raise AnsibleSubnetSearchException(
+ 'Subnet ID "{0}" does not exist'.format(subnet_id))
+
+ subnets_by_cidr = []
+ if subnet_cidrs:
+ subnets_by_cidr = vpc_conn.get_all_subnets(
+ filters={'vpc_id': vpc_id, 'cidr': subnet_cidrs})
+
+ for cidr in subnet_cidrs:
+ if not any(s.cidr_block == cidr for s in subnets_by_cidr):
+ raise AnsibleSubnetSearchException(
+ 'Subnet CIDR "{0}" does not exist'.format(subnet_cidr))
+
+ subnets_by_name = []
+ if subnet_names:
+ subnets_by_name = vpc_conn.get_all_subnets(
+ filters={'vpc_id': vpc_id, 'tag:Name': subnet_names})
+
+ for name in subnet_names:
+ matching_count = len([1 for s in subnets_by_name if s.tags.get('Name') == name])
+ if matching_count == 0:
+ raise AnsibleSubnetSearchException(
+ 'Subnet named "{0}" does not exist'.format(name))
+ elif matching_count > 1:
+ raise AnsibleSubnetSearchException(
+ 'Multiple subnets named "{0}"'.format(name))
+
+ return subnets_by_id + subnets_by_cidr + subnets_by_name
+
+
+def find_igw(vpc_conn, vpc_id):
+ """
+ Finds the Internet gateway for the given VPC ID.
+
+ Raises an AnsibleIgwSearchException if either no IGW can be found, or more
+ than one found for the given VPC.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into potentially be moved into a shared module_utils
+ """
+ igw = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if not igw:
+ raise AnsibleIgwSearchException('No IGW found for VPC {0}'.
+ format(vpc_id))
+ elif len(igw) == 1:
+ return igw[0].id
+ else:
+ raise AnsibleIgwSearchException('Multiple IGWs found for VPC {0}'.
+ format(vpc_id))
+
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in
+ vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+
+def tags_match(match_tags, candidate_tags):
+ return all((k in candidate_tags and candidate_tags[k] == v
+ for k, v in match_tags.iteritems()))
+
+
+def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if tags == cur_tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
+ if to_add:
+ vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ raise AnsibleTagCreationException(
+ 'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
+
+
+def get_route_table_by_id(vpc_conn, vpc_id, route_table_id):
+
+ route_table = None
+ route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id})
+ if route_tables:
+ route_table = route_tables[0]
+
+ return route_table
+
+def get_route_table_by_tags(vpc_conn, vpc_id, tags):
+
+ count = 0
+ route_table = None
+ route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id})
+ for table in route_tables:
+ this_tags = get_resource_tags(vpc_conn, table.id)
+ if tags_match(tags, this_tags):
+ route_table = table
+ count +=1
+
+ if count > 1:
+ raise RuntimeError("Tags provided do not identify a unique route table")
+ else:
+ return route_table
+
+
+def route_spec_matches_route(route_spec, route):
+ key_attr_map = {
+ 'destination_cidr_block': 'destination_cidr_block',
+ 'gateway_id': 'gateway_id',
+ 'instance_id': 'instance_id',
+ 'interface_id': 'interface_id',
+ 'vpc_peering_connection_id': 'vpc_peering_connection_id',
+ }
+
+ # This is a workaround to catch managed NAT gateways as they do not show
+ # up in any of the returned values when describing route tables.
+ # The caveat of doing it this way is that if there was an existing
+ # route for another nat gateway in this route table there is not a way to
+ # change to another nat gateway id. Long term solution would be to utilise
+ # boto3 which is a very big task for this module or to update boto.
+ if route_spec.get('gateway_id') and 'nat-' in route_spec['gateway_id']:
+ if route.destination_cidr_block == route_spec['destination_cidr_block']:
+ if all((not route.gateway_id, not route.instance_id, not route.interface_id, not route.vpc_peering_connection_id)):
+ return True
+
+ for k in key_attr_map.iterkeys():
+ if k in route_spec:
+ if route_spec[k] != getattr(route, k):
+ return False
+ return True
+
+
+def rename_key(d, old_key, new_key):
+ d[new_key] = d[old_key]
+ del d[old_key]
+
+
+def index_of_matching_route(route_spec, routes_to_match):
+ for i, route in enumerate(routes_to_match):
+ if route_spec_matches_route(route_spec, route):
+ return i
+
+
+def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids,
+ check_mode):
+ routes_to_match = list(route_table.routes)
+ route_specs_to_create = []
+ for route_spec in route_specs:
+ i = index_of_matching_route(route_spec, routes_to_match)
+ if i is None:
+ route_specs_to_create.append(route_spec)
+ else:
+ del routes_to_match[i]
+
+ # NOTE: As of boto==2.38.0, the origin of a route is not available
+ # (for example, whether it came from a gateway with route propagation
+ # enabled). Testing for origin == 'EnableVgwRoutePropagation' is more
+ # correct than checking whether the route uses a propagating VGW.
+ # The current logic will leave non-propagated routes using propagating
+ # VGWs in place.
+ routes_to_delete = []
+ for r in routes_to_match:
+ if r.gateway_id:
+ if r.gateway_id != 'local' and not r.gateway_id.startswith('vpce-'):
+ if not propagating_vgw_ids or r.gateway_id not in propagating_vgw_ids:
+ routes_to_delete.append(r)
+ else:
+ routes_to_delete.append(r)
+
+ changed = bool(routes_to_delete or route_specs_to_create)
+ if changed:
+ for route in routes_to_delete:
+ try:
+ vpc_conn.delete_route(route_table.id,
+ route.destination_cidr_block,
+ dry_run=check_mode)
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ pass
+
+ for route_spec in route_specs_to_create:
+ try:
+ vpc_conn.create_route(route_table.id,
+ dry_run=check_mode,
+ **route_spec)
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ pass
+
+ return {'changed': bool(changed)}
+
+
+def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id,
+ check_mode):
+ route_tables = vpc_conn.get_all_route_tables(
+ filters={'association.subnet_id': subnet_id, 'vpc_id': vpc_id}
+ )
+ for route_table in route_tables:
+ if route_table.id is None:
+ continue
+ for a in route_table.associations:
+ if a.subnet_id == subnet_id:
+ if route_table.id == route_table_id:
+ return {'changed': False, 'association_id': a.id}
+ else:
+ if check_mode:
+ return {'changed': True}
+ vpc_conn.disassociate_route_table(a.id)
+
+ association_id = vpc_conn.associate_route_table(route_table_id, subnet_id)
+ return {'changed': True, 'association_id': association_id}
+
+
+def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets,
+ check_mode):
+ current_association_ids = [a.id for a in route_table.associations]
+ new_association_ids = []
+ changed = False
+ for subnet in subnets:
+ result = ensure_subnet_association(
+ vpc_conn, vpc_id, route_table.id, subnet.id, check_mode)
+ changed = changed or result['changed']
+ if changed and check_mode:
+ return {'changed': True}
+ new_association_ids.append(result['association_id'])
+
+ to_delete = [a_id for a_id in current_association_ids
+ if a_id not in new_association_ids]
+
+ for a_id in to_delete:
+ changed = True
+ vpc_conn.disassociate_route_table(a_id, dry_run=check_mode)
+
+ return {'changed': changed}
+
+
+def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids,
+ check_mode):
+
+ # NOTE: As of boto==2.38.0, it is not yet possible to query the existing
+ # propagating gateways. However, EC2 does support this as shown in its API
+ # documentation. For now, a reasonable proxy for this is the presence of
+ # propagated routes using the gateway in the route table. If such a route
+ # is found, propagation is almost certainly enabled.
+ changed = False
+ for vgw_id in propagating_vgw_ids:
+ for r in list(route_table.routes):
+ if r.gateway_id == vgw_id:
+ return {'changed': False}
+
+ changed = True
+ vpc_conn.enable_vgw_route_propagation(route_table.id,
+ vgw_id,
+ dry_run=check_mode)
+
+ return {'changed': changed}
+
+
+def ensure_route_table_absent(connection, module):
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, vpc_id, tags)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+ except RuntimeError as e:
+ module.fail_json(msg=e.args[0])
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ if route_table is None:
+ return {'changed': False}
+
+ try:
+ connection.delete_route_table(route_table.id, dry_run=module.check_mode)
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ pass
+ else:
+ module.fail_json(msg=e.message)
+
+ return {'changed': True}
+
+
+def get_route_table_info(route_table):
+
+ # Add any routes to array
+ routes = []
+ for route in route_table.routes:
+ routes.append(route.__dict__)
+
+ route_table_info = { 'id': route_table.id,
+ 'routes': routes,
+ 'tags': route_table.tags,
+ 'vpc_id': route_table.vpc_id
+ }
+
+ return route_table_info
+
+
+def create_route_spec(connection, module, vpc_id):
+ routes = module.params.get('routes')
+
+ for route_spec in routes:
+ rename_key(route_spec, 'dest', 'destination_cidr_block')
+
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
+ igw = find_igw(connection, vpc_id)
+ route_spec['gateway_id'] = igw
+
+ return routes
+
+
+def ensure_route_table_present(connection, module):
+
+ lookup = module.params.get('lookup')
+ propagating_vgw_ids = module.params.get('propagating_vgw_ids')
+ route_table_id = module.params.get('route_table_id')
+ subnets = module.params.get('subnets')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ try:
+ routes = create_route_spec(connection, module, vpc_id)
+ except AnsibleIgwSearchException as e:
+ module.fail_json(msg=e[0])
+
+ changed = False
+ tags_valid = False
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, vpc_id, tags)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+ except RuntimeError as e:
+ module.fail_json(msg=e.args[0])
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # If no route table returned then create new route table
+ if route_table is None:
+ try:
+ route_table = connection.create_route_table(vpc_id, module.check_mode)
+ changed = True
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ module.exit_json(changed=True)
+
+ module.fail_json(msg=e.message)
+
+ if routes is not None:
+ try:
+ result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, module.check_mode)
+ changed = changed or result['changed']
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ if propagating_vgw_ids is not None:
+ result = ensure_propagation(connection, route_table,
+ propagating_vgw_ids,
+ check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if not tags_valid and tags is not None:
+ result = ensure_tags(connection, route_table.id, tags,
+ add_only=True, check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if subnets:
+ associated_subnets = []
+ try:
+ associated_subnets = find_subnets(connection, vpc_id, subnets)
+ except EC2ResponseError as e:
+ raise AnsibleRouteTableException(
+ 'Unable to find subnets for route table {0}, error: {1}'
+ .format(route_table, e)
+ )
+
+ try:
+ result = ensure_subnet_associations(connection, vpc_id, route_table, associated_subnets, module.check_mode)
+ changed = changed or result['changed']
+ except EC2ResponseError as e:
+ raise AnsibleRouteTableException(
+ 'Unable to associate subnets for route table {0}, error: {1}'
+ .format(route_table, e)
+ )
+
+ module.exit_json(changed=changed, route_table=get_route_table_info(route_table))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ lookup = dict(default='tag', required=False, choices=['tag', 'id']),
+ propagating_vgw_ids = dict(default=None, required=False, type='list'),
+ route_table_id = dict(default=None, required=False),
+ routes = dict(default=[], required=False, type='list'),
+ state = dict(default='present', choices=['present', 'absent']),
+ subnets = dict(default=None, required=False, type='list'),
+ tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id = dict(default=None, required=True)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ state = module.params.get('state', 'present')
+
+ if lookup == 'id' and route_table_id is None:
+ module.fail_json("You must specify route_table_id if lookup is set to id")
+
+ try:
+ if state == 'present':
+ result = ensure_route_table_present(connection, module)
+ elif state == 'absent':
+ result = ensure_route_table_absent(connection, module)
+ except AnsibleRouteTableException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import * # noqa
+from ansible.module_utils.ec2 import * # noqa
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table_facts.py
new file mode 100644
index 0000000000..8b5e60ab2c
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_route_table_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table_facts
+short_description: Gather facts about ec2 VPC route tables in AWS
+description:
+ - Gather facts about ec2 VPC route tables in AWS
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all VPC route tables
+- ec2_vpc_route_table_facts:
+
+# Gather facts about a particular VPC route table using route table ID
+- ec2_vpc_route_table_facts:
+ filters:
+ route-table-id: rtb-00112233
+
+# Gather facts about any VPC route table with a tag key Name and value Example
+- ec2_vpc_route_table_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any VPC route table within VPC with ID vpc-abcdef00
+- ec2_vpc_route_table_facts:
+ filters:
+ vpc-id: vpc-abcdef00
+
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def get_route_table_info(route_table):
+
+ # Add any routes to array
+ routes = []
+ for route in route_table.routes:
+ routes.append(route.__dict__)
+
+ route_table_info = { 'id': route_table.id,
+ 'routes': routes,
+ 'tags': route_table.tags,
+ 'vpc_id': route_table.vpc_id
+ }
+
+ return route_table_info
+
+def list_ec2_vpc_route_tables(connection, module):
+
+ filters = module.params.get("filters")
+ route_table_dict_array = []
+
+ try:
+ all_route_tables = connection.get_all_route_tables(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for route_table in all_route_tables:
+ route_table_dict_array.append(get_route_table_info(route_table))
+
+ module.exit_json(route_tables=route_table_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_route_tables(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet.py
new file mode 100644
index 0000000000..6e7c3e7d43
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds
+version_added: "2.0"
+author: Robert Estelle (@erydo)
+options:
+ az:
+ description:
+ - "The availability zone for the subnet. Only required when state=present."
+ required: false
+ default: null
+ cidr:
+ description:
+ - "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present."
+ required: false
+ default: null
+ tags:
+ description:
+ - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
+ required: false
+ default: null
+ aliases: [ 'resource_tags' ]
+ state:
+ description:
+ - "Create or remove the subnet"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create the subnet."
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ resource_tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+'''
+
+import sys # noqa
+import time
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+
+class AnsibleVPCSubnetException(Exception):
+ pass
+
+
+class AnsibleVPCSubnetCreationException(AnsibleVPCSubnetException):
+ pass
+
+
+class AnsibleVPCSubnetDeletionException(AnsibleVPCSubnetException):
+ pass
+
+
+class AnsibleTagCreationException(AnsibleVPCSubnetException):
+ pass
+
+
+def get_subnet_info(subnet):
+
+ subnet_info = { 'id': subnet.id,
+ 'availability_zone': subnet.availability_zone,
+ 'available_ip_address_count': subnet.available_ip_address_count,
+ 'cidr_block': subnet.cidr_block,
+ 'default_for_az': subnet.defaultForAz,
+ 'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
+ 'state': subnet.state,
+ 'tags': subnet.tags,
+ 'vpc_id': subnet.vpc_id
+ }
+
+ return subnet_info
+
+def subnet_exists(vpc_conn, subnet_id):
+ filters = {'subnet-id': subnet_id}
+ subnet = vpc_conn.get_all_subnets(filters=filters)
+ if subnet and subnet[0].state == "available":
+ return subnet[0]
+ else:
+ return False
+
+
+def create_subnet(vpc_conn, vpc_id, cidr, az, check_mode):
+ try:
+ new_subnet = vpc_conn.create_subnet(vpc_id, cidr, az, dry_run=check_mode)
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception. boto doesn't seem to refresh 'state' of the newly
+ # created subnet, i.e.: it's always 'pending'.
+ subnet = False
+ while subnet is False:
+ subnet = subnet_exists(vpc_conn, new_subnet.id)
+ time.sleep(0.1)
+ except EC2ResponseError as e:
+ if e.error_code == "DryRunOperation":
+ subnet = None
+ else:
+ raise AnsibleVPCSubnetCreationException(
+ 'Unable to create subnet {0}, error: {1}'.format(cidr, e))
+
+ return subnet
+
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in
+ vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+
+def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if cur_tags == tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
+ if to_add:
+ vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ raise AnsibleTagCreationException(
+ 'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
+
+
+def get_matching_subnet(vpc_conn, vpc_id, cidr):
+ subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc_id})
+ return next((s for s in subnets if s.cidr_block == cidr), None)
+
+
+def ensure_subnet_present(vpc_conn, vpc_id, cidr, az, tags, check_mode):
+ subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
+ changed = False
+ if subnet is None:
+ subnet = create_subnet(vpc_conn, vpc_id, cidr, az, check_mode)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+
+ if tags != subnet.tags:
+ ensure_tags(vpc_conn, subnet.id, tags, False, check_mode)
+ subnet.tags = tags
+ changed = True
+
+ subnet_info = get_subnet_info(subnet)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet_info
+ }
+
+
+def ensure_subnet_absent(vpc_conn, vpc_id, cidr, check_mode):
+ subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ vpc_conn.delete_subnet(subnet.id, dry_run=check_mode)
+ return {'changed': True}
+ except EC2ResponseError as e:
+ raise AnsibleVPCSubnetDeletionException(
+ 'Unable to delete subnet {0}, error: {1}'
+ .format(subnet.cidr_block, e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ az = dict(default=None, required=False),
+ cidr = dict(default=None, required=True),
+ state = dict(default='present', choices=['present', 'absent']),
+ tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id = dict(default=None, required=True)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ vpc_id = module.params.get('vpc_id')
+ tags = module.params.get('tags')
+ cidr = module.params.get('cidr')
+ az = module.params.get('az')
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, vpc_id, cidr, az, tags,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, vpc_id, cidr,
+ check_mode=module.check_mode)
+ except AnsibleVPCSubnetException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import * # noqa
+from ansible.module_utils.ec2 import * # noqa
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet_facts.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet_facts.py
new file mode 100644
index 0000000000..c363729224
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_subnet_facts.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet_facts
+short_description: Gather facts about ec2 VPC subnets in AWS
+description:
+ - Gather facts about ec2 VPC subnets in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all VPC subnets
+- ec2_vpc_subnet_facts:
+
+# Gather facts about a particular VPC subnet using ID
+- ec2_vpc_subnet_facts:
+ filters:
+ subnet-id: subnet-00112233
+
+# Gather facts about any VPC subnet with a tag key Name and value Example
+- ec2_vpc_subnet_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any VPC subnet within VPC with ID vpc-abcdef00
+- ec2_vpc_subnet_facts:
+ filters:
+ vpc-id: vpc-abcdef00
+
+# Gather facts about a set of VPC subnets, publicA, publicB and publicC within a
+# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
+# subnet_ids as a list.
+
+- ec2_vpc_subnet_facts:
+ filters:
+ vpc-id: vpc-abcdef00
+ "tag:Name": "{{ item }}"
+ with_items:
+ - publicA
+ - publicB
+ - publicC
+
+- set_fact:
+ subnet_ids: "{{ subnet_facts.results|map(attribute='subnets.0.id')|list }}"
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def get_subnet_info(subnet):
+
+ subnet_info = { 'id': subnet.id,
+ 'availability_zone': subnet.availability_zone,
+ 'available_ip_address_count': subnet.available_ip_address_count,
+ 'cidr_block': subnet.cidr_block,
+ 'default_for_az': subnet.defaultForAz,
+ 'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
+ 'state': subnet.state,
+ 'tags': subnet.tags,
+ 'vpc_id': subnet.vpc_id
+ }
+
+ return subnet_info
+
+def list_ec2_vpc_subnets(connection, module):
+
+ filters = module.params.get("filters")
+ subnet_dict_array = []
+
+ try:
+ all_subnets = connection.get_all_subnets(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for subnet in all_subnets:
+ subnet_dict_array.append(get_subnet_info(subnet))
+
+ module.exit_json(subnets=subnet_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_subnets(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_vgw.py b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_vgw.py
new file mode 100644
index 0000000000..c3e4d1f1ce
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_vpc_vgw.py
@@ -0,0 +1,598 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+module: ec2_vpc_vgw
+short_description: Create and delete AWS VPN Virtual Gateways.
+description:
+ - Creates AWS VPN Virtual Gateways
+ - Deletes AWS VPN Virtual Gateways
+ - Attaches Virtual Gateways to VPCs
+ - Detaches Virtual Gateways from VPCs
+version_added: "2.2"
+requirements: [ boto3 ]
+options:
+ state:
+ description:
+ - present to ensure resource is created.
+ - absent to remove resource
+ required: false
+ default: present
+ choices: [ "present", "absent"]
+ name:
+ description:
+ - name of the vgw to be created or deleted
+ required: false
+ type:
+ description:
+ - type of the virtual gateway to be created
+ required: false
+ choices: [ "ipsec.1" ]
+ vpn_gateway_id:
+ description:
+ - vpn gateway id of an existing virtual gateway
+ required: false
+ vpc_id:
+ description:
+ - the vpc-id of a vpc to attach or detach
+ required: false
+ wait_timeout:
+ description:
+ - number of seconds to wait for status during vpc attach and detach
+ required: false
+ default: 320
+ tags:
+ description:
+ - dictionary of resource tags
+ required: false
+ default: null
+ aliases: [ "resource_tags" ]
+author: Nick Aslanidis (@naslanidis)
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: Create a new vgw attached to a specific VPC
+ ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ vpc_id: vpc-12345678
+ name: personal-testing
+ type: ipsec.1
+ register: created_vgw
+
+- name: Create a new unattached vgw
+ ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ tags:
+ environment: production
+ owner: ABC
+ register: created_vgw
+
+- name: Remove a new vgw using the name
+ ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ register: deleted_vgw
+
+- name: Remove a new vgw using the vpn_gateway_id
+ ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ vpn_gateway_id: vgw-3a9aa123
+ register: deleted_vgw
+'''
+
+RETURN = '''
+result:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dictionary
+'''
+
+try:
+ import json
+ import time
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def get_vgw_info(vgws):
+ if not isinstance(vgws, list):
+ return
+
+ for vgw in vgws:
+ vgw_info = {
+ 'id': vgw['VpnGatewayId'],
+ 'type': vgw['Type'],
+ 'state': vgw['State'],
+ 'vpc_id': None,
+ 'tags': dict()
+ }
+
+ for tag in vgw['Tags']:
+ vgw_info['tags'][tag['Key']] = tag['Value']
+
+ if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
+ vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
+
+ return vgw_info
+
+def wait_for_status(client, module, vpn_gateway_id, status):
+ polling_increment_secs = 15
+ max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = find_vgw(client, module, vpn_gateway_id)
+ if response[0]['VpcAttachments'][0]['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return status_achieved, result
+
+
+def attach_vgw(client, module, vpn_gateway_id):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ try:
+ response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ if vpc_id:
+ try:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def create_vgw(client, module):
+ params = dict()
+ params['Type'] = module.params.get('type')
+
+ try:
+ response = client.create_vpn_gateway(Type=params['Type'])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def delete_vgw(client, module, vpn_gateway_id):
+
+ try:
+ response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ #return the deleted VpnGatewayId as this is not included in the above response
+ result = vpn_gateway_id
+ return result
+
+
+def create_tags(client, module, vpn_gateway_id):
+ params = dict()
+
+ try:
+ response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
+ params = dict()
+
+ if tags_to_delete:
+ try:
+ response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ response = client.delete_tags(Resources=[vpn_gateway_id])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def load_tags(module):
+ tags = []
+
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').iteritems():
+ tags.append({'Key': name, 'Value': str(value)})
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ else:
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ return tags
+
+
+def find_tags(client, module, resource_id=None):
+
+ if resource_id:
+ try:
+ response = client.describe_tags(Filters=[
+ {'Name': 'resource-id', 'Values': [resource_id]}
+ ])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def check_tags(client, module, existing_vgw, vpn_gateway_id):
+ params = dict()
+ params['Tags'] = module.params.get('tags')
+ vgw = existing_vgw
+ changed = False
+ tags_list = {}
+
+ #format tags for comparison
+ for tags in existing_vgw[0]['Tags']:
+ if tags['Key'] != 'Name':
+ tags_list[tags['Key']] = tags['Value']
+
+ # if existing tags don't match the tags arg, delete existing and recreate with new list
+ if params['Tags'] != None and tags_list != params['Tags']:
+ delete_tags(client, module, vpn_gateway_id)
+ create_tags(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module)
+ changed = True
+
+ #if no tag args are supplied, delete any existing tags with the exception of the name tag
+ if params['Tags'] == None and tags_list != {}:
+ tags_to_delete = []
+ for tags in existing_vgw[0]['Tags']:
+ if tags['Key'] != 'Name':
+ tags_to_delete.append(tags)
+
+ delete_tags(client, module, vpn_gateway_id, tags_to_delete)
+ vgw = find_vgw(client, module)
+ changed = True
+
+ return vgw, changed
+
+
+def find_vpc(client, module):
+ params = dict()
+ params['vpc_id'] = module.params.get('vpc_id')
+
+ if params['vpc_id']:
+ try:
+ response = client.describe_vpcs(VpcIds=[params['vpc_id']])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def find_vgw(client, module, vpn_gateway_id=None):
+ params = dict()
+ params['Name'] = module.params.get('name')
+ params['Type'] = module.params.get('type')
+ params['State'] = module.params.get('state')
+
+ if params['State'] == 'present':
+ try:
+ response = client.describe_vpn_gateways(Filters=[
+ {'Name': 'type', 'Values': [params['Type']]},
+ {'Name': 'tag:Name', 'Values': [params['Name']]}
+ ])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ else:
+ if vpn_gateway_id:
+ try:
+ response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ else:
+ try:
+ response = client.describe_vpn_gateways(Filters=[
+ {'Name': 'type', 'Values': [params['Type']]},
+ {'Name': 'tag:Name', 'Values': [params['Name']]}
+ ])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response['VpnGateways']
+ return result
+
+
+def ensure_vgw_present(client, module):
+
+# If an existing vgw name and type matches our args, then a match is considered to have been
+# found and we will not create another vgw.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # Check that a name argument has been supplied.
+ if not module.params.get('name'):
+ module.fail_json(msg='A name is required when a status of \'present\' is suppled')
+
+ # check if a gateway matching our module args already exists
+ existing_vgw = find_vgw(client, module)
+
+ if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
+
+ # if a vpc_id was provided, check if it exists and if it's attached
+ if params['VpcId']:
+
+ # check that the vpc_id exists. If not, an exception is thrown
+ vpc = find_vpc(client, module)
+ current_vpc_attachments = existing_vgw[0]['VpcAttachments']
+
+ if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
+ if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached':
+ changed = False
+ else:
+
+ # detach the existing vpc from the virtual gateway
+ vpc_to_detach = current_vpc_attachments[0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ time.sleep(5)
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+ changed = True
+ else:
+ # attach the vgw to the supplied vpc
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+ changed = True
+
+ # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
+ else:
+ existing_vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ if existing_vgw[0]['VpcAttachments'] != []:
+ if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ # detach the vpc from the vgw
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ else:
+ # create a new vgw
+ new_vgw = create_vgw(client, module)
+ changed = True
+ vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
+
+ # tag the new virtual gateway
+ create_tags(client, module, vpn_gateway_id)
+
+ # return current state of the vgw
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ # if a vpc-id was supplied, attempt to attach it to the vgw
+ if params['VpcId']:
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ result = get_vgw_info(vgw)
+ return changed, result
+
+
+def ensure_vgw_absent(client, module):
+
+# If an existing vgw name and type matches our args, then a match is considered to have been
+# found and we will take steps to delete it.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # check if a gateway matching our module args already exists
+ if params['VpnGatewayIds']:
+ existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
+ if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
+ existing_vgw = existing_vgw_with_id
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = "Nothing to do"
+
+ else:
+ #Check that a name and type argument has been supplied if no vgw-id
+ if not module.params.get('name') or not module.params.get('type'):
+ module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
+
+ existing_vgw = find_vgw(client, module)
+ if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
+
+ #now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ #now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = None
+
+ result = deleted_vgw
+ return changed, result
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ region=dict(required=True),
+ name=dict(),
+ vpn_gateway_id=dict(),
+ vpc_id=dict(),
+ wait_timeout=dict(type='int', default=320),
+ type=dict(default='ipsec.1', choices=['ipsec.1']),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='json and boto3 is required.')
+
+ state = module.params.get('state').lower()
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError:
+ e = get_exception()
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ if state == 'present':
+ (changed, results) = ensure_vgw_present(client, module)
+ else:
+ (changed, results) = ensure_vgw_absent(client, module)
+ module.exit_json(changed=changed, vgw=results)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/cloud/amazon/ec2_win_password.py b/lib/ansible/modules/extras/cloud/amazon/ec2_win_password.py
new file mode 100644
index 0000000000..e0f6205f3b
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ec2_win_password.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_win_password
+short_description: gets the default administrator password for ec2 windows instances
+description:
+ - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
+version_added: "2.0"
+author: "Rick Mendes (@rickmendes)"
+options:
+ instance_id:
+ description:
+ - The instance id to get the password data from.
+ required: true
+ key_file:
+ description:
+ - Path to the file containing the key pair used on the instance.
+ required: true
+ key_passphrase:
+ version_added: "2.0"
+ description:
+ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
+ required: false
+ default: null
+ wait:
+ version_added: "2.0"
+ description:
+ - Whether or not to wait for the password to be available before returning.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ version_added: "2.0"
+ description:
+ - Number of seconds to wait before giving up.
+ required: false
+ default: 120
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Example of getting a password
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+
+# Example of getting a password with a password protected key
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_protected_test_key.pem"
+ key_passphrase: "secret"
+
+# Example of waiting for a password
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+ wait: yes
+ wait_timeout: 45
+'''
+
+from base64 import b64decode
+from os.path import expanduser
+from Crypto.Cipher import PKCS1_v1_5
+from Crypto.PublicKey import RSA
+import datetime
+
+try:
+ import boto.ec2
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ instance_id = dict(required=True),
+ key_file = dict(required=True),
+ key_passphrase = dict(no_log=True, default=None, required=False),
+ wait = dict(type='bool', default=False, required=False),
+ wait_timeout = dict(default=120, required=False),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='Boto required for this module.')
+
+ instance_id = module.params.get('instance_id')
+ key_file = expanduser(module.params.get('key_file'))
+ key_passphrase = module.params.get('key_passphrase')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ ec2 = ec2_connect(module)
+
+ if wait:
+ start = datetime.datetime.now()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+
+ while datetime.datetime.now() < end:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+ if wait and not decoded:
+ time.sleep(5)
+ else:
+ break
+ else:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+
+ if wait and datetime.datetime.now() >= end:
+ module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
+
+ try:
+ f = open(key_file, 'r')
+ except IOError as e:
+ module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
+ else:
+ try:
+ with f:
+ key = RSA.importKey(f.read(), key_passphrase)
+ except (ValueError, IndexError, TypeError) as e:
+ module.fail_json(msg = "unable to parse key file")
+
+ cipher = PKCS1_v1_5.new(key)
+ sentinel = 'password decryption failed!!!'
+
+ try:
+ decrypted = cipher.decrypt(decoded, sentinel)
+ except ValueError as e:
+ decrypted = None
+
+ if decrypted == None:
+ module.exit_json(win_password='', changed=False)
+ else:
+ if wait:
+ elapsed = datetime.datetime.now() - start
+ module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
+ else:
+ module.exit_json(win_password=decrypted, changed=True)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ecs_cluster.py b/lib/ansible/modules/extras/cloud/amazon/ecs_cluster.py
new file mode 100644
index 0000000000..22049a9f3c
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ecs_cluster.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ecs_cluster
+short_description: create or terminate ecs clusters
+notes:
+ - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
+ - It will also wait for a cluster to have instances registered to it.
+description:
+ - Creates or terminates ecs clusters.
+version_added: "2.0"
+author: Mark Chance(@Java1Guy)
+requirements: [ boto, boto3 ]
+options:
+ state:
+ description:
+ - The desired state of the cluster
+ required: true
+ choices: ['present', 'absent', 'has_instances']
+ name:
+ description:
+ - The cluster name
+ required: true
+ delay:
+ description:
+ - Number of seconds to wait
+ required: false
+ repeat:
+ description:
+ - The number of times to wait for the cluster to have an instance
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Cluster creation
+- ecs_cluster:
+ name: default
+ state: present
+
+# Cluster deletion
+- ecs_cluster:
+ name: default
+ state: absent
+
+- name: Wait for register
+ ecs_cluster:
+ name: "{{ new_cluster }}"
+ state: has_instances
+ delay: 10
+ repeat: 10
+ register: task_output
+
+'''
+RETURN = '''
+activeServicesCount:
+ description: how many services are active in this cluster
+ returned: 0 if a new cluster
+ type: int
+clusterArn:
+ description: the ARN of the cluster just created
+ type: string (ARN)
+ sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
+clusterName:
+ description: name of the cluster just created (should match the input argument)
+ type: string
+ sample: test-cluster-mfshcdok
+pendingTasksCount:
+ description: how many tasks are waiting to run in this cluster
+ returned: 0 if a new cluster
+ type: int
+registeredContainerInstancesCount:
+ description: how many container instances are available in this cluster
+ returned: 0 if a new cluster
+ type: int
+runningTasksCount:
+ description: how many tasks are running in this cluster
+ returned: 0 if a new cluster
+ type: int
+status:
+ description: the status of the new cluster
+ returned: ACTIVE
+ type: string
+'''
+import time
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+class EcsClusterManager:
+ """Handles ECS Clusters"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ # self.ecs = boto3.client('ecs')
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound, e:
+ self.module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
+ for c in array_of_clusters:
+ if c[field_name].endswith(cluster_name):
+ return c
+ return None
+
+ def describe_cluster(self, cluster_name):
+ response = self.ecs.describe_clusters(clusters=[
+ cluster_name
+ ])
+ if len(response['failures'])>0:
+ c = self.find_in_array(response['failures'], cluster_name, 'arn')
+ if c and c['reason']=='MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['clusters'])>0:
+ c = self.find_in_array(response['clusters'], cluster_name)
+ if c:
+ return c
+ raise Exception("Unknown problem describing cluster %s." % cluster_name)
+
+ def create_cluster(self, clusterName = 'default'):
+ response = self.ecs.create_cluster(clusterName=clusterName)
+ return response['cluster']
+
+ def delete_cluster(self, clusterName):
+ return self.ecs.delete_cluster(cluster=clusterName)
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
+ name=dict(required=True, type='str' ),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10)
+ ))
+ required_together = ( ['state', 'name'] )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ cluster_mgr = EcsClusterManager(module)
+ try:
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ except Exception, e:
+ module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing and 'status' in existing and existing['status']=="ACTIVE":
+ results['cluster']=existing
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
+ results['changed'] = True
+
+ # delete the cluster
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['cluster'] = existing
+ if 'status' in existing and existing['status']=="INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ cluster_mgr.delete_cluster(module.params['name'])
+ results['changed'] = True
+ elif module.params['state'] == 'has_instances':
+ if not existing:
+ module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ count = 0
+ for i in range(repeat):
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ count = existing['registeredContainerInstancesCount']
+ if count > 0:
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if count == 0 and i is repeat-1:
+ module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
+ return
+
+ module.exit_json(**results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ecs_service.py b/lib/ansible/modules/extras/cloud/amazon/ecs_service.py
new file mode 100644
index 0000000000..94d7078c82
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ecs_service.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ecs_service
+short_description: create, terminate, start or stop a service in ecs
+description:
+ - Creates or terminates ecs services.
+notes:
+ - the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
+ - for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
+dependencies:
+ - An IAM role must have been created
+version_added: "2.1"
+author:
+ - "Mark Chance (@java1guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ state:
+ description:
+ - The desired state of the service
+ required: true
+ choices: ["present", "absent", "deleting"]
+ name:
+ description:
+ - The name of the service
+ required: true
+ cluster:
+ description:
+ - The name of the cluster in which the service exists
+ required: false
+ task_definition:
+ description:
+ - The task definition the service will run
+ required: false
+ load_balancers:
+ description:
+ - The list of ELBs defined for this service
+ required: false
+
+ desired_count:
+ description:
+ - The count of how many instances of the service
+ required: false
+ client_token:
+ description:
+ - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
+ required: false
+ role:
+ description:
+ - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.
+ required: false
+ delay:
+ description:
+ - The time to wait before checking that the service is available
+ required: false
+ default: 10
+ repeat:
+ description:
+ - The number of times to check that the service is available
+ required: false
+ default: 10
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- ecs_service:
+ state: present
+ name: console-test-service
+ cluster: new_cluster
+ task_definition: new_cluster-task:1"
+ desired_count: 0
+
+# Basic provisioning example
+- ecs_service:
+ name: default
+ state: present
+ cluster: new_cluster
+
+# Simple example to delete
+- ecs_service:
+ name: default
+ state: absent
+ cluster: new_cluster
+'''
+
+RETURN = '''
+service:
+ description: Details of created service.
+ returned: when creating a service
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: string
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: string
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: string
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: string
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: string
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: string
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: string
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list of complex
+ events:
+ description: lost of service events
+ returned: always
+ type: list of complex
+ansible_facts:
+ description: Facts about deleted service.
+ returned: when deleting a service
+ type: complex
+ contains:
+ service:
+ description: Details of deleted service in the same structure described above for service creation.
+ returned: when service existed and was deleted
+ type: complex
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ # self.ecs = boto3.client('ecs')
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound, e:
+ self.module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ # def list_clusters(self):
+ # return self.client.list_clusters()
+ # {'failures=[],
+ # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
+ # 'clusters=[{'activeServicesCount=0, 'clusterArn='arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status='ACTIVE', 'pendingTasksCount=0, 'runningTasksCount=0, 'registeredContainerInstancesCount=0, 'clusterName='default'}]}
+ # {'failures=[{'arn='arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason='MISSING'}],
+ # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
+ # 'clusters=[]}
+
+ def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
+ for c in array_of_services:
+ if c[field_name].endswith(service_name):
+ return c
+ return None
+
+ def describe_service(self, cluster_name, service_name):
+ response = self.ecs.describe_services(
+ cluster=cluster_name,
+ services=[
+ service_name
+ ])
+ msg = ''
+ if len(response['failures'])>0:
+ c = self.find_in_array(response['failures'], service_name, 'arn')
+ msg += ", failure reason is "+c['reason']
+ if c and c['reason']=='MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['services'])>0:
+ c = self.find_in_array(response['services'], service_name)
+ if c:
+ return c
+ raise StandardError("Unknown problem describing service %s." % service_name)
+
+ def is_matching_service(self, expected, existing):
+ if expected['task_definition'] != existing['taskDefinition']:
+ return False
+
+ if (expected['load_balancers'] or []) != existing['loadBalancers']:
+ return False
+
+ if (expected['desired_count'] or 0) != existing['desiredCount']:
+ return False
+
+ return True
+
+ def create_service(self, service_name, cluster_name, task_definition,
+ load_balancers, desired_count, client_token, role):
+ response = self.ecs.create_service(
+ cluster=cluster_name,
+ serviceName=service_name,
+ taskDefinition=task_definition,
+ loadBalancers=load_balancers,
+ desiredCount=desired_count,
+ clientToken=client_token,
+ role=role)
+ return self.jsonize(response['service'])
+
+ def update_service(self, service_name, cluster_name, task_definition,
+ load_balancers, desired_count, client_token, role):
+ response = self.ecs.update_service(
+ cluster=cluster_name,
+ service=service_name,
+ taskDefinition=task_definition,
+ desiredCount=desired_count)
+ return self.jsonize(response['service'])
+
+ def jsonize(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+ def delete_service(self, service, cluster=None):
+ return self.ecs.delete_service(cluster=cluster, service=service)
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent', 'deleting'] ),
+ name=dict(required=True, type='str' ),
+ cluster=dict(required=False, type='str' ),
+ task_definition=dict(required=False, type='str' ),
+ load_balancers=dict(required=False, type='list' ),
+ desired_count=dict(required=False, type='int' ),
+ client_token=dict(required=False, type='str' ),
+ role=dict(required=False, type='str' ),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10)
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ if module.params['state'] == 'present':
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To use create a service, a task_definition must be specified")
+ if not 'desired_count' in module.params and module.params['desired_count'] is None:
+ module.fail_json(msg="To use create a service, a desired_count must be specified")
+
+ service_mgr = EcsServiceManager(module)
+ try:
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ except Exception, e:
+ module.fail_json(msg="Exception describing service '"+module.params['name']+"' in cluster '"+module.params['cluster']+"': "+str(e))
+
+ results = dict(changed=False )
+ if module.params['state'] == 'present':
+
+ matching = False
+ update = False
+ if existing and 'status' in existing and existing['status']=="ACTIVE":
+ if service_mgr.is_matching_service(module.params, existing):
+ matching = True
+ results['service'] = service_mgr.jsonize(existing)
+ else:
+ update = True
+
+ if not matching:
+ if not module.check_mode:
+ if module.params['load_balancers'] is None:
+ loadBalancers = []
+ else:
+ loadBalancers = module.params['load_balancers']
+ if module.params['role'] is None:
+ role = ''
+ else:
+ role = module.params['role']
+ if module.params['client_token'] is None:
+ clientToken = ''
+ else:
+ clientToken = module.params['client_token']
+
+ if update:
+ # update required
+ response = service_mgr.update_service(module.params['name'],
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role)
+ else:
+ # doesn't exist. create it.
+ response = service_mgr.create_service(module.params['name'],
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role)
+
+ results['service'] = response
+
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ del existing['deployments']
+ del existing['events']
+ results['ansible_facts'] = existing
+ if 'status' in existing and existing['status']=="INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ try:
+ service_mgr.delete_service(
+ module.params['name'],
+ module.params['cluster']
+ )
+ except botocore.exceptions.ClientError, e:
+ module.fail_json(msg=e.message)
+ results['changed'] = True
+
+ elif module.params['state'] == 'deleting':
+ if not existing:
+ module.fail_json(msg="Service '"+module.params['name']+" not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ for i in range(repeat):
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ status = existing['status']
+ if status == "INACTIVE":
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if i is repeat-1:
+ module.fail_json(msg="Service still not deleted after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
+ return
+
+ module.exit_json(**results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ecs_service_facts.py b/lib/ansible/modules/extras/cloud/amazon/ecs_service_facts.py
new file mode 100644
index 0000000000..f363c56a87
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ecs_service_facts.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ecs_service_facts
+short_description: list or describe services in ecs
+notes:
+ - for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
+description:
+ - Lists or describes services in ecs.
+version_added: "2.1"
+author:
+ - "Mark Chance (@java1guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ details:
+ description:
+ - Set this to true if you want detailed information about the services.
+ required: false
+ default: 'false'
+ choices: ['true', 'false']
+ cluster:
+ description:
+ - The cluster ARNS in which to list the services.
+ required: false
+ default: 'default'
+ service:
+ description:
+ - The service to get details for (required if details is true)
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic listing example
+- ecs_service_facts:
+ cluster: test-cluster
+ service: console-test-service
+ details: true
+
+# Basic listing example
+- ecs_service_facts:
+ cluster: test-cluster
+'''
+
+RETURN = '''
+services:
+ description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
+ returned: success
+ type: list of complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: string
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: string
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: string
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: string
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: string
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: string
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: string
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list of complex
+ events:
+ description: lost of service events
+ returned: always
+ type: list of complex
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ # self.ecs = boto3.client('ecs')
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound, e:
+ self.module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ # def list_clusters(self):
+ # return self.client.list_clusters()
+ # {'failures': [],
+ # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
+ # 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
+ # {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
+ # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
+ # 'clusters': []}
+
+ def list_services(self, cluster):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ response = self.ecs.list_services(**fn_args)
+ relevant_response = dict(services = response['serviceArns'])
+ return relevant_response
+
+ def describe_services(self, cluster, services):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ fn_args['services']=services.split(",")
+ response = self.ecs.describe_services(**fn_args)
+ relevant_response = dict(services = map(self.extract_service_from, response['services']))
+ if 'failures' in response and len(response['failures'])>0:
+ relevant_response['services_not_running'] = response['failures']
+ return relevant_response
+
+ def extract_service_from(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ details=dict(required=False, type='bool', default=False ),
+ cluster=dict(required=False, type='str' ),
+ service=dict(required=False, type='str' )
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ show_details = module.params.get('details', False)
+
+ task_mgr = EcsServiceManager(module)
+ if show_details:
+ if 'service' not in module.params or not module.params['service']:
+ module.fail_json(msg="service must be specified for ecs_service_facts")
+ ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
+ else:
+ ecs_facts = task_mgr.list_services(module.params['cluster'])
+
+ ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
+ module.exit_json(**ecs_facts_result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ecs_task.py b/lib/ansible/modules/extras/cloud/amazon/ecs_task.py
new file mode 100644
index 0000000000..f263ef0da9
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ecs_task.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ecs_task
+short_description: run, start or stop a task in ecs
+description:
+ - Creates or deletes instances of task definitions.
+version_added: "2.0"
+author: Mark Chance(@Java1Guy)
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ operation:
+ description:
+ - Which task operation to execute
+ required: True
+ choices: ['run', 'start', 'stop']
+ cluster:
+ description:
+ - The name of the cluster to run the task on
+ required: False
+ task_definition:
+ description:
+ - The task definition to start or run
+ required: False
+ overrides:
+ description:
+ - A dictionary of values to pass to the new instances
+ required: False
+ count:
+ description:
+ - How many new instances to start
+ required: False
+ task:
+ description:
+ - The task to stop
+ required: False
+ container_instances:
+ description:
+ - The list of container instances on which to deploy the task
+ required: False
+ started_by:
+ description:
+ - A value showing who or what started the task (for informational purposes)
+ required: False
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Simple example of run task
+- name: Run task
+ ecs_task:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ count: 1
+ started_by: ansible_user
+ register: task_output
+
+# Simple example of start task
+
+- name: Start a task
+ ecs_task:
+ operation: start
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ container_instances:
+ - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
+ started_by: ansible_user
+ register: task_output
+
+- name: Stop a task
+ ecs_task:
+ operation: stop
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+'''
+RETURN = '''
+task:
+ description: details about the tast that was started
+ returned: success
+ type: complex
+ contains:
+ taskArn:
+ description: The Amazon Resource Name (ARN) that identifies the task.
+ returned: always
+ type: string
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
+ returned: only when details is true
+ type: string
+ taskDefinitionArn:
+ description: The Amazon Resource Name (ARN) of the task definition.
+ returned: only when details is true
+ type: string
+ containerInstanceArn:
+ description: The Amazon Resource Name (ARN) of the container running the task.
+ returned: only when details is true
+ type: string
+ overrides:
+ description: The container overrides set for this task.
+ returned: only when details is true
+ type: list of complex
+ lastStatus:
+ description: The last recorded status of the task.
+ returned: only when details is true
+ type: string
+ desiredStatus:
+ description: The desired status of the task.
+ returned: only when details is true
+ type: string
+ containers:
+ description: The container details.
+ returned: only when details is true
+ type: list of complex
+ startedBy:
+ description: The used who started the task.
+ returned: only when details is true
+ type: string
+ stoppedReason:
+ description: The reason why the task was stopped.
+ returned: only when details is true
+ type: string
+ createdAt:
+ description: The timestamp of when the task was created.
+ returned: only when details is true
+ type: string
+ startedAt:
+ description: The timestamp of when the task was started.
+ returned: only when details is true
+ type: string
+ stoppedAt:
+ description: The timestamp of when the task was stopped.
+ returned: only when details is true
+ type: string
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+class EcsExecManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ def list_tasks(self, cluster_name, service_name, status):
+ response = self.ecs.list_tasks(
+ cluster=cluster_name,
+ family=service_name,
+ desiredStatus=status
+ )
+ if len(response['taskArns'])>0:
+ for c in response['taskArns']:
+ if c.endswith(service_name):
+ return c
+ return None
+
+ def run_task(self, cluster, task_definition, overrides, count, startedBy):
+ if overrides is None:
+ overrides = dict()
+ response = self.ecs.run_task(
+ cluster=cluster,
+ taskDefinition=task_definition,
+ overrides=overrides,
+ count=count,
+ startedBy=startedBy)
+ # include tasks and failures
+ return response['tasks']
+
+ def start_task(self, cluster, task_definition, overrides, container_instances, startedBy):
+ args = dict()
+ if cluster:
+ args['cluster'] = cluster
+ if task_definition:
+ args['taskDefinition']=task_definition
+ if overrides:
+ args['overrides']=overrides
+ if container_instances:
+ args['containerInstances']=container_instances
+ if startedBy:
+ args['startedBy']=startedBy
+ response = self.ecs.start_task(**args)
+ # include tasks and failures
+ return response['tasks']
+
+ def stop_task(self, cluster, task):
+ response = self.ecs.stop_task(cluster=cluster, task=task)
+ return response['task']
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ operation=dict(required=True, choices=['run', 'start', 'stop'] ),
+ cluster=dict(required=False, type='str' ), # R S P
+ task_definition=dict(required=False, type='str' ), # R* S*
+ overrides=dict(required=False, type='dict'), # R S
+ count=dict(required=False, type='int' ), # R
+ task=dict(required=False, type='str' ), # P*
+ container_instances=dict(required=False, type='list'), # S*
+ started_by=dict(required=False, type='str' ) # R S
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # Validate Requirements
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ # Validate Inputs
+ if module.params['operation'] == 'run':
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To run a task, a task_definition must be specified")
+ task_to_list = module.params['task_definition']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'start':
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To start a task, a task_definition must be specified")
+ if not 'container_instances' in module.params and module.params['container_instances'] is None:
+ module.fail_json(msg="To start a task, container instances must be specified")
+ task_to_list = module.params['task']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'stop':
+ if not 'task' in module.params and module.params['task'] is None:
+ module.fail_json(msg="To stop a task, a task must be specified")
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To stop a task, a task definition must be specified")
+ task_to_list = module.params['task_definition']
+ status_type = "STOPPED"
+
+ service_mgr = EcsExecManager(module)
+ existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
+
+ results = dict(changed=False)
+ if module.params['operation'] == 'run':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task']=existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.run_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['count'],
+ module.params['started_by'])
+ results['changed'] = True
+
+ elif module.params['operation'] == 'start':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task']=existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.start_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['container_instances'],
+ module.params['started_by']
+ )
+ results['changed'] = True
+
+ elif module.params['operation'] == 'stop':
+ if existing:
+ results['task']=existing
+ else:
+ if not module.check_mode:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['task'] = service_mgr.stop_task(
+ module.params['cluster'],
+ module.params['task']
+ )
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/ecs_taskdefinition.py b/lib/ansible/modules/extras/cloud/amazon/ecs_taskdefinition.py
new file mode 100644
index 0000000000..e924417f9d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/ecs_taskdefinition.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ecs_taskdefinition
+short_description: register a task definition in ecs
+description:
+ - Creates or terminates task definitions
+version_added: "2.0"
+author: Mark Chance(@Java1Guy)
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ state:
+ description:
+ - State whether the task definition should exist or be deleted
+ required: true
+ choices: ['present', 'absent']
+ arn:
+ description:
+ - The arn of the task description to delete
+ required: false
+ family:
+ description:
+ - A Name that would be given to the task definition
+ required: false
+ revision:
+ description:
+ - A revision number for the task definition
+ required: False
+ type: int
+ containers:
+ description:
+ - A list of containers definitions
+ required: False
+ type: list of dicts with container definitions
+ volumes:
+ description:
+ - A list of names of volumes to be attached
+ required: False
+ type: list of name
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: "Create task definition"
+ ecs_taskdefinition:
+ containers:
+ - name: simple-app
+ cpu: 10
+ essential: true
+ image: "httpd:2.4"
+ memory: 300
+ mountPoints:
+ - containerPath: /usr/local/apache2/htdocs
+ sourceVolume: my-vol
+ portMappings:
+ - containerPort: 80
+ hostPort: 80
+ - name: busybox
+ command:
+ - "/bin/sh -c \"while true; do echo '<html> <head> <title>Amazon ECS Sample App</title> <style>body {margin-top: 40px; background-color: #333;} </style> </head><body> <div style=color:white;text-align:center> <h1>Amazon ECS Sample App</h1> <h2>Congratulations!</h2> <p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done\""
+ cpu: 10
+ entryPoint:
+ - sh
+ - "-c"
+ essential: false
+ image: busybox
+ memory: 200
+ volumesFrom:
+ - sourceContainer: simple-app
+ volumes:
+ - name: my-vol
+ family: test-cluster-taskdef
+ state: present
+ register: task_output
+'''
+RETURN = '''
+taskdefinition:
+ description: a reflection of the input parameters
+ type: dict inputs plus revision, status, taskDefinitionArn
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+class EcsTaskManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ def describe_task(self, task_name):
+ try:
+ response = self.ecs.describe_task_definition(taskDefinition=task_name)
+ return response['taskDefinition']
+ except botocore.exceptions.ClientError:
+ return None
+
+ def register_task(self, family, container_definitions, volumes):
+ response = self.ecs.register_task_definition(family=family,
+ containerDefinitions=container_definitions, volumes=volumes)
+ return response['taskDefinition']
+
+ def deregister_task(self, taskArn):
+ response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
+ return response['taskDefinition']
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent'] ),
+ arn=dict(required=False, type='str' ),
+ family=dict(required=False, type='str' ),
+ revision=dict(required=False, type='int' ),
+ containers=dict(required=False, type='list' ),
+ volumes=dict(required=False, type='list' )
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ task_to_describe = None
+ # When deregistering a task, we can specify the ARN OR
+ # the family and revision.
+ if module.params['state'] == 'absent':
+ if 'arn' in module.params and module.params['arn'] is not None:
+ task_to_describe = module.params['arn']
+ elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and module.params['revision'] is not None:
+ task_to_describe = module.params['family']+":"+str(module.params['revision'])
+ else:
+ module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
+ # When registering a task, we can specify the ARN OR
+ # the family and revision.
+ if module.params['state'] == 'present':
+ if not 'family' in module.params:
+ module.fail_json(msg="To use task definitions, a family must be specified")
+ if not 'containers' in module.params:
+ module.fail_json(msg="To use task definitions, a list of containers must be specified")
+ task_to_describe = module.params['family']
+
+ task_mgr = EcsTaskManager(module)
+ existing = task_mgr.describe_task(task_to_describe)
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing and 'status' in existing and existing['status']=="ACTIVE":
+ results['taskdefinition']=existing
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ volumes = []
+ if 'volumes' in module.params:
+ volumes = module.params['volumes']
+ if volumes is None:
+ volumes = []
+ results['taskdefinition'] = task_mgr.register_task(module.params['family'],
+ module.params['containers'], volumes)
+ results['changed'] = True
+
+ # delete the cloudtrai
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['taskdefinition'] = existing
+ if 'status' in existing and existing['status']=="INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ task_mgr.deregister_task(task_to_describe)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/efs.py b/lib/ansible/modules/extras/cloud/amazon/efs.py
new file mode 100644
index 0000000000..388a3e8dd8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/efs.py
@@ -0,0 +1,629 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: efs
+short_description: create and maintain EFS file systems
+description:
+ - Module allows create, search and destroy Amazon EFS file systems
+version_added: "2.2"
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+ - "Artem Kazakov (@akazakov)"
+options:
+ state:
+ description:
+ - Allows to create, search and destroy Amazon EFS file system
+ required: false
+ default: 'present'
+ choices: ['present', 'absent']
+ name:
+ description:
+ - Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
+ required: false
+ default: None
+ id:
+ description:
+ - ID of Amazon EFS. Either name or ID required for delete.
+ required: false
+ default: None
+ performance_mode:
+ description:
+ - File system's performance mode to use. Only takes effect during creation.
+ required: false
+ default: 'general_purpose'
+ choices: ['general_purpose', 'max_io']
+ tags:
+ description:
+ - |
+ List of tags of Amazon EFS. Should be defined as dictionary
+ In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data.
+ required: false
+ default: None
+ targets:
+ description:
+ - |
+ List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
+ - subnet_id - Mandatory. The ID of the subnet to add the mount target in.
+ - ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
+ - security_groups - Optional. List of security group IDs, of the form "sg-xxxxxxxx". These must be for the same VPC as subnet specified
+ This data may be modified for existing EFS using state 'present' and new list of mount targets.
+ required: false
+ default: None
+ wait:
+ description:
+ - |
+ In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
+ In case of 'absent' state should wait for EFS 'deleted' life cycle state
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ wait_timeout:
+ description:
+ - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
+ required: false
+ default: 0
+extends_documentation_fragment:
+ - aws
+'''
+
+EXAMPLES = '''
+# EFS provisioning
+- efs:
+ state: present
+ name: myTestEFS
+ tags:
+ name: myTestNameTag
+ purpose: file-storage
+ targets:
+ - subnet_id: subnet-748c5d03
+ security_groups: [ "sg-1a2b3c4d" ]
+
+# Modifying EFS data
+- efs:
+ state: present
+ name: myTestEFS
+ tags:
+ name: myAnotherTestTag
+ targets:
+ - subnet_id: subnet-7654fdca
+ security_groups: [ "sg-4c5d6f7a" ]
+
+# Deleting EFS
+- efs:
+ state: absent
+ name: myTestEFS
+'''
+
+RETURN = '''
+creation_time:
+ description: timestamp of creation date
+ returned:
+ type: datetime
+ sample: 2015-11-16 07:30:57-05:00
+creation_token:
+ description: EFS creation token
+ returned:
+ type: UUID
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned:
+ type: unique ID
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned:
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system
+ returned:
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned:
+ type: list of dicts
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned:
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned:
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned:
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned:
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned:
+ type: str
+ sample: "generalPurpose"
+tags:
+ description: tags on the efs instance
+ returned:
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+import sys
+from time import sleep
+from time import time as timestamp
+from collections import defaultdict
+
+try:
+ from botocore.exceptions import ClientError
+ import boto3
+ HAS_BOTO3 = True
+except ImportError as e:
+ HAS_BOTO3 = False
+
+
+class EFSConnection(object):
+
+ DEFAULT_WAIT_TIMEOUT_SECONDS = 0
+
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module, region, **aws_connect_params):
+ try:
+ self.connection = boto3_conn(module, conn_type='client',
+ resource='efs', region=region,
+ **aws_connect_params)
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
+
+ self.region = region
+ self.wait = module.params.get('wait')
+ self.wait_timeout = module.params.get('wait_timeout')
+
+ def get_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ items = iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ )
+ for item in items:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ Suffix of network path to be used as NFS device for mount. More detail here:
+ http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if item['LifeCycleState'] == self.STATE_AVAILABLE:
+ item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
+ item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ else:
+ item['Tags'] = {}
+ item['MountTargets'] = []
+ yield item
+
+ def get_tags(self, **kwargs):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ tags = iterate_all(
+ 'Tags',
+ self.connection.describe_tags,
+ **kwargs
+ )
+ return dict((tag['Key'], tag['Value']) for tag in tags)
+
+ def get_mount_targets(self, **kwargs):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ **kwargs
+ )
+ for target in targets:
+ if target['LifeCycleState'] == self.STATE_AVAILABLE:
+ target['SecurityGroups'] = list(self.get_security_groups(
+ MountTargetId=target['MountTargetId']
+ ))
+ else:
+ target['SecurityGroups'] = []
+ yield target
+
+ def get_security_groups(self, **kwargs):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return iterate_all(
+ 'SecurityGroups',
+ self.connection.describe_mount_target_security_groups,
+ **kwargs
+ )
+
+ def get_file_system_id(self, name):
+ """
+ Returns ID of instance by instance name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name
+ ))
+ return info and info['FileSystemId'] or None
+
+ def get_file_system_state(self, name, file_system_id=None):
+ """
+ Returns state of filesystem by EFS id/name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name,
+ FileSystemId=file_system_id
+ ))
+ return info and info['LifeCycleState'] or self.STATE_DELETED
+
+ def get_mount_targets_in_state(self, file_system_id, states=None):
+ """
+ Returns states of mount targets of selected EFS with selected state(s) (optional)
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ FileSystemId=file_system_id
+ )
+
+ if states:
+ if not isinstance(states, list):
+ states = [states]
+ targets = filter(lambda target: target['LifeCycleState'] in states, targets)
+
+ return list(targets)
+
+ def create_file_system(self, name, performance_mode):
+ """
+ Creates new filesystem with selected name
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ if state in [self.STATE_DELETING, self.STATE_DELETED]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED
+ )
+ self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode)
+ changed = True
+
+ # we always wait for the state to be available when creating.
+ # if we try to take any actions on the file system before it's available
+ # we'll throw errors
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE,
+ self.wait_timeout
+ )
+
+ return changed
+
+ def converge_file_system(self, name, tags, targets):
+ """
+ Change attributes (mount targets and tags) of filesystem by name
+ """
+ result = False
+ fs_id = self.get_file_system_id(name)
+
+ if tags is not None:
+ tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags)
+
+ if tags_to_delete:
+ self.connection.delete_tags(
+ FileSystemId=fs_id,
+ TagKeys=[item[0] for item in tags_to_delete]
+ )
+ result = True
+
+ if tags_to_create:
+ self.connection.create_tags(
+ FileSystemId=fs_id,
+ Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create]
+ )
+ result = True
+
+ if targets is not None:
+ incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+
+ index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items)
+
+ current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id))
+ targets = index_by_subnet_id(targets)
+
+ targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
+ targets, True)
+
+ """ To modify mount target it should be deleted and created again """
+ changed = filter(
+ lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
+ current_targets[sid], targets[sid]), intersection)
+ targets_to_delete = list(targets_to_delete) + changed
+ targets_to_create = list(targets_to_create) + changed
+
+ if targets_to_delete:
+ for sid in targets_to_delete:
+ self.connection.delete_mount_target(
+ MountTargetId=current_targets[sid]['MountTargetId']
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+ result = True
+
+ if targets_to_create:
+ for sid in targets_to_create:
+ self.connection.create_mount_target(
+ FileSystemId=fs_id,
+ **targets[sid]
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0,
+ self.wait_timeout
+ )
+ result = True
+
+ security_groups_to_update = filter(
+ lambda sid: 'SecurityGroups' in targets[sid] and
+ current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'],
+ intersection
+ )
+
+ if security_groups_to_update:
+ for sid in security_groups_to_update:
+ self.connection.modify_mount_target_security_groups(
+ MountTargetId=current_targets[sid]['MountTargetId'],
+ SecurityGroups=targets[sid]['SecurityGroups']
+ )
+ result = True
+
+ return result
+
+ def delete_file_system(self, name, file_system_id=None):
+ """
+ Removes EFS instance by id/name
+ """
+ result = False
+ state = self.get_file_system_state(name, file_system_id)
+ if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE
+ )
+ if not file_system_id:
+ file_system_id = self.get_file_system_id(name)
+ self.delete_mount_targets(file_system_id)
+ self.connection.delete_file_system(FileSystemId=file_system_id)
+ result = True
+
+ if self.wait:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED,
+ self.wait_timeout
+ )
+
+ return result
+
+ def delete_mount_targets(self, file_system_id):
+ """
+ Removes mount targets by EFS id
+ """
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
+ 0
+ )
+
+ targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
+ for target in targets:
+ self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
+
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
+ 0
+ )
+
+ return len(targets) > 0
+
+
+def iterate_all(attr, map_method, **kwargs):
+ """
+ Method creates iterator from boto result set
+ """
+ args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
+ wait = 1
+ while True:
+ try:
+ data = map_method(**args)
+ for elm in data[attr]:
+ yield elm
+ if 'NextMarker' in data:
+ args['Marker'] = data['Nextmarker']
+ continue
+ break
+ except ClientError as e:
+ if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
+ sleep(wait)
+ wait = wait * 2
+ continue
+
+def targets_equal(keys, a, b):
+ """
+ Method compare two mount targets by specified attributes
+ """
+ for key in keys:
+ if key in b and a[key] != b[key]:
+ return False
+
+ return True
+
+
+def dict_diff(dict1, dict2, by_key=False):
+ """
+ Helper method to calculate difference of two dictionaries
+ """
+ keys1 = set(dict1.keys() if by_key else dict1.items())
+ keys2 = set(dict2.keys() if by_key else dict2.items())
+
+ intersection = keys1 & keys2
+
+ return keys2 ^ intersection, intersection, keys1 ^ intersection
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
+ """
+ Helper method to wait for desired value returned by callback method
+ """
+ wait_start = timestamp()
+ while True:
+ if callback() != value:
+ if timeout != 0 and (timestamp() - wait_start > timeout):
+ raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
+ else:
+ sleep(5)
+ continue
+ break
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
+ id=dict(required=False, type='str', default=None),
+ name=dict(required=False, type='str', default=None),
+ tags=dict(required=False, type="dict", default={}),
+ targets=dict(required=False, type="list", default=[]),
+ performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
+ wait=dict(required=False, type="bool", default=False),
+ wait_timeout=dict(required=False, type="int", default=0)
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ connection = EFSConnection(module, region, **aws_connect_params)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ target_translations = {
+ 'ip_address': 'IpAddress',
+ 'security_groups': 'SecurityGroups',
+ 'subnet_id': 'SubnetId'
+ }
+ targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
+ performance_mode_translations = {
+ 'general_purpose': 'generalPurpose',
+ 'max_io': 'maxIO'
+ }
+ performance_mode = performance_mode_translations[module.params.get('performance_mode')]
+ changed = False
+
+ state = str(module.params.get('state')).lower()
+
+ if state == 'present':
+ if not name:
+ module.fail_json(msg='Name parameter is required for create')
+
+ changed = connection.create_file_system(name, performance_mode)
+ changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed
+ result = first_or_default(connection.get_file_systems(CreationToken=name))
+
+ elif state == 'absent':
+ if not name and not fs_id:
+ module.fail_json(msg='Either name or id parameter is required for delete')
+
+ changed = connection.delete_file_system(name, fs_id)
+ result = None
+ if result:
+ result = camel_dict_to_snake_dict(result)
+ module.exit_json(changed=changed, efs=result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/efs_facts.py b/lib/ansible/modules/extras/cloud/amazon/efs_facts.py
new file mode 100644
index 0000000000..1720ec5d80
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/efs_facts.py
@@ -0,0 +1,377 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: efs_facts
+short_description: Get information about Amazon EFS file systems
+description:
+ - Module searches Amazon EFS file systems
+version_added: "2.2"
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+options:
+ name:
+ description:
+ - Creation Token of Amazon EFS file system.
+ required: false
+ default: None
+ id:
+ description:
+ - ID of Amazon EFS.
+ required: false
+ default: None
+ tags:
+ description:
+ - |
+ List of tags of Amazon EFS. Should be defined as dictionary
+ required: false
+ default: None
+ targets:
+ description:
+ - |
+ List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
+ - SubnetId - Mandatory. The ID of the subnet to add the mount target in.
+ - IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
+ - SecurityGroups - Optional. List of security group IDs, of the form "sg-xxxxxxxx". These must be for the same VPC as subnet specified.
+ required: false
+ default: None
+extends_documentation_fragment:
+ - aws
+'''
+
+EXAMPLES = '''
+# find all existing efs
+- efs_facts:
+ register: result
+
+- efs_facts:
+ name: myTestNameTag
+
+- efs_facts:
+ id: fs-1234abcd
+
+# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
+- efs_facts:
+ tags:
+ name: myTestNameTag
+ targets:
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
+'''
+
+RETURN = '''
+creation_time:
+ description: timestamp of creation date
+ returned:
+ type: datetime
+ sample: 2015-11-16 07:30:57-05:00
+creation_token:
+ description: EFS creation token
+ returned:
+ type: UUID
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned:
+ type: unique ID
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned:
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system
+ returned:
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned:
+ type: list of dicts
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned:
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned:
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned:
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned:
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned:
+ type: str
+ sample: "generalPurpose"
+tags:
+ description: tags on the efs instance
+ returned:
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+
+from time import sleep
+from collections import defaultdict
+
+try:
+ from botocore.exceptions import ClientError
+ import boto3
+ HAS_BOTO3 = True
+except ImportError as e:
+ HAS_BOTO3 = False
+
+class EFSConnection(object):
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module, region, **aws_connect_params):
+ try:
+ self.connection = boto3_conn(module, conn_type='client',
+ resource='efs', region=region,
+ **aws_connect_params)
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
+
+ self.region = region
+
+ def get_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ items = iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ )
+ for item in items:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ Suffix of network path to be used as NFS device for mount. More detail here:
+ http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if item['LifeCycleState'] == self.STATE_AVAILABLE:
+ item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
+ item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ else:
+ item['Tags'] = {}
+ item['MountTargets'] = []
+ yield item
+
+ def get_tags(self, **kwargs):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ tags = iterate_all(
+ 'Tags',
+ self.connection.describe_tags,
+ **kwargs
+ )
+ return dict((tag['Key'], tag['Value']) for tag in tags)
+
+ def get_mount_targets(self, **kwargs):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ **kwargs
+ )
+ for target in targets:
+ if target['LifeCycleState'] == self.STATE_AVAILABLE:
+ target['SecurityGroups'] = list(self.get_security_groups(
+ MountTargetId=target['MountTargetId']
+ ))
+ else:
+ target['SecurityGroups'] = []
+ yield target
+
+ def get_security_groups(self, **kwargs):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return iterate_all(
+ 'SecurityGroups',
+ self.connection.describe_mount_target_security_groups,
+ **kwargs
+ )
+
+
+def iterate_all(attr, map_method, **kwargs):
+ """
+ Method creates iterator from boto result set
+ """
+ args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
+ wait = 1
+ while True:
+ try:
+ data = map_method(**args)
+ for elm in data[attr]:
+ yield elm
+ if 'NextMarker' in data:
+ args['Marker'] = data['Nextmarker']
+ continue
+ break
+ except ClientError as e:
+ if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
+ sleep(wait)
+ wait = wait * 2
+ continue
+
+
+def prefix_to_attr(attr_id):
+ """
+ Helper method to convert ID prefix to mount target attribute
+ """
+ attr_by_prefix = {
+ 'fsmt-': 'MountTargetId',
+ 'subnet-': 'SubnetId',
+ 'eni-': 'NetworkInterfaceId',
+ 'sg-': 'SecurityGroups'
+ }
+ prefix = first_or_default(filter(
+ lambda pref: str(attr_id).startswith(pref),
+ attr_by_prefix.keys()
+ ))
+ if prefix:
+ return attr_by_prefix[prefix]
+ return 'IpAddress'
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+def has_tags(available, required):
+ """
+ Helper method to determine if tag requested already exists
+ """
+ for key, value in required.items():
+ if key not in available or value != available[key]:
+ return False
+ return True
+
+def has_targets(available, required):
+ """
+ Helper method to determine if mount tager requested already exists
+ """
+ grouped = group_list_of_dict(available)
+ for (value, field) in required:
+ if field not in grouped or value not in grouped[field]:
+ return False
+ return True
+
+def group_list_of_dict(array):
+ """
+ Helper method to group list of dict to dict with all possible values
+ """
+ result = defaultdict(list)
+ for item in array:
+ for key, value in item.items():
+ result[key] += value if isinstance(value, list) else [value]
+ return result
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ id=dict(required=False, type='str', default=None),
+ name=dict(required=False, type='str', default=None),
+ tags=dict(required=False, type="dict", default={}),
+ targets=dict(required=False, type="list", default=[])
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ connection = EFSConnection(module, region, **aws_connect_params)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ targets = module.params.get('targets')
+
+ file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
+
+ if tags:
+ file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
+
+ if targets:
+ targets = [(item, prefix_to_attr(item)) for item in targets]
+ file_systems_info = filter(lambda item:
+ has_targets(item['MountTargets'], targets), file_systems_info)
+
+ file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
+ module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/execute_lambda.py b/lib/ansible/modules/extras/cloud/amazon/execute_lambda.py
new file mode 100644
index 0000000000..bd1b9288e2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/execute_lambda.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: execute_lambda
+short_description: Execute an AWS Lambda function
+description:
+ - This module executes AWS Lambda functions, allowing synchronous and asynchronous
+ invocation.
+version_added: "2.2"
+extends_documentation_fragment:
+ - aws
+author: "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
+requirements:
+ - python >= 2.6
+ - boto3
+notes:
+ - Async invocation will always return an empty C(output) key.
+ - Synchronous invocation may result in a function timeout, resulting in an
+ empty C(output) key.
+options:
+ name:
+ description:
+ - The name of the function to be invoked. This can only be used for
+ invocations within the calling account. To invoke a function in another
+ account, use I(function_arn) to specify the full ARN.
+ required: false
+ default: None
+ function_arn:
+ description:
+ - The name of the function to be invoked
+ required: false
+ default: None
+ tail_log:
+ description:
+ - If C(tail_log=true), the result of the task will include the last 4 KB
+ of the CloudWatch log for the function execution. Log tailing only
+ works if you use synchronous invocation C(wait=true). This is usually
+ used for development or testing Lambdas.
+ required: false
+ default: false
+ wait:
+ description:
+ - Whether to wait for the function results or not. If I(wait) is false,
+ the task will not return any results. To wait for the Lambda function
+ to complete, set C(wait=true) and the result will be available in the
+ I(output) key.
+ required: false
+ default: true
+ dry_run:
+ description:
+ - Do not *actually* invoke the function. A C(DryRun) call will check that
+ the caller has permissions to call the function, especially for
+ checking cross-account permissions.
+ required: false
+ default: False
+ version_qualifier:
+ description:
+ - Which version/alias of the function to run. This defaults to the
+ C(LATEST) revision, but can be set to any existing version or alias.
+ See https;//docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html
+ for details.
+ required: false
+ default: LATEST
+ payload:
+ description:
+ - A dictionary in any form to be provided as input to the Lambda function.
+ required: false
+ default: {}
+'''
+
+EXAMPLES = '''
+- execute_lambda:
+ name: test-function
+ # the payload is automatically serialized and sent to the function
+ payload:
+ foo: bar
+ value: 8
+ register: response
+
+# Test that you have sufficient permissions to execute a Lambda function in
+# another account
+- execute_lambda:
+ function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
+ dry_run: true
+
+- execute_lambda:
+ name: test-function
+ payload:
+ foo: bar
+ value: 8
+ wait: true
+ tail_log: true
+ register: response
+ # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda.
+
+- execute_lambda: name=test-function version_qualifier=PRODUCTION
+'''
+
+RETURN = '''
+output:
+ description: Function output if wait=true and the function returns a value
+ returned: success
+ type: dict
+ sample: "{ 'output': 'something' }"
+logs:
+ description: The last 4KB of the function logs. Only provided if I(tail_log) is true
+ type: string
+status:
+ description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
+ type: int
+ sample: 200
+'''
+
+import base64
+import json
+import traceback
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name = dict(),
+ function_arn = dict(),
+ wait = dict(choices=BOOLEANS, default=True, type='bool'),
+ tail_log = dict(choices=BOOLEANS, default=False, type='bool'),
+ dry_run = dict(choices=BOOLEANS, default=False, type='bool'),
+ version_qualifier = dict(),
+ payload = dict(default={}, type='dict'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'function_arn'],
+ ]
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ name = module.params.get('name')
+ function_arn = module.params.get('function_arn')
+ await_return = module.params.get('wait')
+ dry_run = module.params.get('dry_run')
+ tail_log = module.params.get('tail_log')
+ version_qualifier = module.params.get('version_qualifier')
+ payload = module.params.get('payload')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='Python module "boto3" is missing, please install it')
+
+ if not (name or function_arn):
+ module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3)
+ if not region:
+ module.fail_json(msg="The AWS region must be specified as an "
+ "environment variable or in the AWS credentials "
+ "profile.")
+
+ try:
+ client = boto3_conn(module, conn_type='client', resource='lambda',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
+ module.fail_json(msg="Failure connecting boto3 to AWS", exception=traceback.format_exc(e))
+
+ invoke_params = {}
+
+ if await_return:
+ # await response
+ invoke_params['InvocationType'] = 'RequestResponse'
+ else:
+ # fire and forget
+ invoke_params['InvocationType'] = 'Event'
+ if dry_run or module.check_mode:
+ # dry_run overrides invocation type
+ invoke_params['InvocationType'] = 'DryRun'
+
+ if tail_log and await_return:
+ invoke_params['LogType'] = 'Tail'
+ elif tail_log and not await_return:
+ module.fail_json(msg="The `tail_log` parameter is only available if "
+ "the invocation waits for the function to complete. "
+ "Set `wait` to true or turn off `tail_log`.")
+ else:
+ invoke_params['LogType'] = 'None'
+
+ if version_qualifier:
+ invoke_params['Qualifier'] = version_qualifier
+
+ if payload:
+ invoke_params['Payload'] = json.dumps(payload)
+
+ if function_arn:
+ invoke_params['FunctionName'] = function_arn
+ elif name:
+ invoke_params['FunctionName'] = name
+
+ try:
+ response = client.invoke(**invoke_params)
+ except botocore.exceptions.ClientError as ce:
+ if ce.response['Error']['Code'] == 'ResourceNotFoundException':
+ module.fail_json(msg="Could not find Lambda to execute. Make sure "
+ "the ARN is correct and your profile has "
+ "permissions to execute this function.",
+ exception=traceback.format_exc(ce))
+ module.fail_json("Client-side error when invoking Lambda, check inputs and specific error",
+ exception=traceback.format_exc(ce))
+ except botocore.exceptions.ParamValidationError as ve:
+ module.fail_json(msg="Parameters to `invoke` failed to validate",
+ exception=traceback.format_exc(ve))
+ except Exception as e:
+ module.fail_json(msg="Unexpected failure while invoking Lambda function",
+ exception=traceback.format_exc(e))
+
+ results ={
+ 'logs': '',
+ 'status': response['StatusCode'],
+ 'output': '',
+ }
+
+ if response.get('LogResult'):
+ try:
+ # logs are base64 encoded in the API response
+ results['logs'] = base64.b64decode(response.get('LogResult', ''))
+ except Exception as e:
+ module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc(e))
+
+ if invoke_params['InvocationType'] == 'RequestResponse':
+ try:
+ results['output'] = json.loads(response['Payload'].read())
+ except Exception as e:
+ module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc(e))
+
+ if isinstance(results.get('output'), dict) and any(
+ [results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
+ # AWS sends back stack traces and error messages when a function failed
+ # in a RequestResponse (synchronous) context.
+ template = ("Function executed, but there was an error in the Lambda function. "
+ "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
+ error_data = {
+ # format the stacktrace sent back as an array into a multiline string
+ 'trace': '\n'.join(
+ [' '.join([
+ str(x) for x in line # cast line numbers to strings
+ ]) for line in results.get('output', {}).get('stackTrace', [])]
+ ),
+ 'errmsg': results['output'].get('errorMessage'),
+ 'type': results['output'].get('errorType')
+ }
+ module.fail_json(msg=template.format(**error_data), result=results)
+
+ module.exit_json(changed=True, result=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/iam_mfa_device_facts.py b/lib/ansible/modules/extras/cloud/amazon/iam_mfa_device_facts.py
new file mode 100644
index 0000000000..2b97d0bee4
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/iam_mfa_device_facts.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: iam_mfa_device_facts
+short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
+description:
+ - List the MFA (Multi-Factor Authentication) devices registered for a user
+version_added: "2.2"
+author: Victor Costan (@pwnall)
+options:
+ user_name:
+ description:
+ - The name of the user whose MFA devices will be listed
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - boto3
+ - botocore
+'''
+
+RETURN = """
+mfa_devices:
+ description: The MFA devices registered for the given user
+ returned: always
+ type: list
+ sample:
+ - enable_date: "2016-03-11T23:25:36+00:00"
+ serial_number: arn:aws:iam::085120003701:mfa/pwnall
+ user_name: pwnall
+ - enable_date: "2016-03-11T23:25:37+00:00"
+ serial_number: arn:aws:iam::085120003702:mfa/pwnall
+ user_name: pwnall
+"""
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
+iam_mfa_device_facts:
+register: mfa_devices
+
+# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+sts_assume_role:
+ mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+register: assumed_role
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def list_mfa_devices(connection, module):
+ user_name = module.params.get('user_name')
+ changed = False
+
+ args = {}
+ if user_name is not None:
+ args['UserName'] = user_name
+ try:
+ response = connection.list_mfa_devices(**args)
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ user_name=dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_mfa_devices(connection, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/iam_server_certificate_facts.py b/lib/ansible/modules/extras/cloud/amazon/iam_server_certificate_facts.py
new file mode 100644
index 0000000000..259b515320
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/iam_server_certificate_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate_facts
+short_description: Retrieve the facts of a server certificate
+description:
+ - Retrieve the attributes of a server certificate
+version_added: "2.2"
+author: "Allen Sanabria (@linuxdynasty)"
+requirements: [boto3, botocore]
+options:
+ name:
+ description:
+ - The name of the server certificate you are retrieving attributes for.
+ required: true
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Retrieve server certificate
+- iam_server_certificate_facts:
+ name: production-cert
+ register: server_cert
+
+# Fail if the server certificate name was not found
+- iam_server_certificate_facts:
+ name: production-cert
+ register: server_cert
+ failed_when: "{{ server_cert.results | length == 0 }}"
+'''
+
+RETURN = '''
+server_certificate_id:
+ description: The 21 character certificate id
+ returned: success
+ type: str
+ sample: "ADWAJXWTZAXIPIMQHMJPO"
+certificate_body:
+ description: The asn1der encoded PEM string
+ returned: success
+ type: str
+ sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
+server_certificate_name:
+ description: The name of the server certificate
+ returned: success
+ type: str
+ sample: "server-cert-name"
+arn:
+ description: The Amazon resource name of the server certificate
+ returned: success
+ type: str
+ sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+path:
+ description: The path of the server certificate
+ returned: success
+ type: str
+ sample: "/"
+expiration:
+ description: The date and time this server certificate will expire, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2017-06-15T12:00:00+00:00"
+upload_date:
+ description: The date and time this server certificate was uploaded, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-04-25T00:36:40+00:00"
+'''
+
+
+try:
+ import boto3
+ import botocore.exceptions
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_server_certs(iam, name=None):
+ """Retrieve the attributes of a server certificate if it exists or all certs.
+ Args:
+ iam (botocore.client.IAM): The boto3 iam instance.
+
+ Kwargs:
+ name (str): The name of the server certificate.
+
+ Basic Usage:
+ >>> import boto3
+ >>> iam = boto3.client('iam')
+ >>> name = "server-cert-name"
+ >>> results = get_server_certs(iam, name)
+ {
+ "upload_date": "2015-04-25T00:36:40+00:00",
+ "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
+ "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
+ "server_certificate_name": "server-cert-name",
+ "expiration": "2017-06-15T12:00:00+00:00",
+ "path": "/",
+ "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+ }
+ """
+ results = dict()
+ try:
+ if name:
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ else:
+ server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+
+ for server_cert in server_certs:
+ if not name:
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
+ cert_md = server_cert['ServerCertificateMetadata']
+ results[cert_md['ServerCertificateName']] = {
+ 'certificate_body': server_cert['CertificateBody'],
+ 'server_certificate_id': cert_md['ServerCertificateId'],
+ 'server_certificate_name': cert_md['ServerCertificateName'],
+ 'arn': cert_md['Arn'],
+ 'path': cert_md['Path'],
+ 'expiration': cert_md['Expiration'].isoformat(),
+ 'upload_date': cert_md['UploadDate'].isoformat(),
+ }
+
+ except botocore.exceptions.ClientError:
+ pass
+
+ return results
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec,)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
+
+ cert_name = module.params.get('name')
+ results = get_server_certs(iam, cert_name)
+ module.exit_json(results=results)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/kinesis_stream.py b/lib/ansible/modules/extras/cloud/amazon/kinesis_stream.py
new file mode 100644
index 0000000000..37f20f8c11
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/kinesis_stream.py
@@ -0,0 +1,1098 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: kinesis_stream
+short_description: Manage a Kinesis Stream.
+description:
+ - Create or Delete a Kinesis Stream.
+ - Update the retention period of a Kinesis Stream.
+ - Update Tags on a Kinesis Stream.
+version_added: "2.2"
+author: Allen Sanabria (@linuxdynasty)
+options:
+ name:
+ description:
+ - "The name of the Kinesis Stream you are managing."
+ default: None
+ required: true
+ shards:
+ description:
+ - "The number of shards you want to have with this stream. This can not
+ be modified after being created."
+ - "This is required when state == present"
+ required: false
+ default: None
+ retention_period:
+ description:
+ - "The default retention period is 24 hours and can not be less than 24
+ hours."
+ - "The retention period can be modified during any point in time."
+ required: false
+ default: None
+ state:
+ description:
+ - "Create or Delete the Kinesis Stream."
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ required: false
+ default: 300
+ tags:
+ description:
+ - "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }."
+ required: false
+ default: null
+ aliases: [ "resource_tags" ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE
+ kinesis_stream:
+ name: test-stream
+ shards: 10
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE
+ kinesis_stream:
+ name: test-stream
+ shards: 10
+ tags:
+ Env: development
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE
+ kinesis_stream:
+ name: test-stream
+ retention_period: 48
+ shards: 10
+ tags:
+ Env: development
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic delete example:
+- name: Delete Kinesis Stream test-stream and wait for it to finish deleting.
+ kinesis_stream:
+ name: test-stream
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+'''
+
+RETURN = '''
+stream_name:
+ description: The name of the Kinesis Stream.
+ returned: when state == present.
+ type: string
+ sample: "test-stream"
+stream_arn:
+ description: The amazon resource identifier
+ returned: when state == present.
+ type: string
+ sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream"
+stream_status:
+ description: The current state of the Kinesis Stream.
+ returned: when state == present.
+ type: string
+ sample: "ACTIVE"
+retention_period_hours:
+ description: Number of hours messages will be kept for a Kinesis Stream.
+ returned: when state == present.
+ type: int
+ sample: 24
+tags:
+ description: Dictionary containing all the tags associated with the Kinesis stream.
+ returned: when state == present.
+ type: dict
+ sample: {
+ "Name": "Splunk",
+ "Env": "development"
+ }
+'''
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+import re
+import datetime
+import time
+from functools import reduce
+
+
+def convert_to_lower(data):
+ """Convert all uppercase keys in dict with lowercase_
+ Args:
+ data (dict): Dictionary with keys that have upper cases in them
+ Example.. FooBar == foo_bar
+ if a val is of type datetime.datetime, it will be converted to
+ the ISO 8601
+
+ Basic Usage:
+ >>> test = {'FooBar': []}
+ >>> test = convert_to_lower(test)
+ {
+ 'foo_bar': []
+ }
+
+ Returns:
+ Dictionary
+ """
+ results = dict()
+ if isinstance(data, dict):
+ for key, val in data.items():
+ key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
+ if key[0] == '_':
+ key = key[1:]
+ if isinstance(val, datetime.datetime):
+ results[key] = val.isoformat()
+ elif isinstance(val, dict):
+ results[key] = convert_to_lower(val)
+ elif isinstance(val, list):
+ converted = list()
+ for item in val:
+ converted.append(convert_to_lower(item))
+ results[key] = converted
+ else:
+ results[key] = val
+ return results
+
+
+def make_tags_in_proper_format(tags):
+ """Take a dictionary of tags and convert them into the AWS Tags format.
+ Args:
+ tags (list): The tags you want applied.
+
+ Basic Usage:
+ >>> tags = [{'Key': 'env', 'Value': 'development'}]
+ >>> make_tags_in_proper_format(tags)
+ {
+ "env": "development",
+ }
+
+ Returns:
+ Dict
+ """
+ formatted_tags = dict()
+ for tag in tags:
+ formatted_tags[tag.get('Key')] = tag.get('Value')
+
+ return formatted_tags
+
+
+def make_tags_in_aws_format(tags):
+ """Take a dictionary of tags and convert them into the AWS Tags format.
+ Args:
+ tags (dict): The tags you want applied.
+
+ Basic Usage:
+ >>> tags = {'env': 'development', 'service': 'web'}
+ >>> make_tags_in_proper_format(tags)
+ [
+ {
+ "Value": "web",
+ "Key": "service"
+ },
+ {
+ "Value": "development",
+ "key": "env"
+ }
+ ]
+
+ Returns:
+ List
+ """
+ formatted_tags = list()
+ for key, val in tags.items():
+ formatted_tags.append({
+ 'Key': key,
+ 'Value': val
+ })
+
+ return formatted_tags
+
+
+def get_tags(client, stream_name, check_mode=False):
+ """Retrieve the tags for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >> get_tags(client, stream_name)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ try:
+ if not check_mode:
+ results = (
+ client.list_tags_for_stream(**params)['Tags']
+ )
+ else:
+ results = [
+ {
+ 'Key': 'DryRunMode',
+ 'Value': 'true'
+ },
+ ]
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg, results
+
+
+def find_stream(client, stream_name, check_mode=False):
+ """Retrieve a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ has_more_shards = True
+ shards = list()
+ try:
+ if not check_mode:
+ while has_more_shards:
+ results = (
+ client.describe_stream(**params)['StreamDescription']
+ )
+ shards.extend(results.pop('Shards'))
+ has_more_shards = results['HasMoreShards']
+ results['Shards'] = shards
+ results['ShardsCount'] = len(shards)
+ else:
+ results = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': stream_name,
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name),
+ 'StreamStatus': 'ACTIVE'
+ }
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg, results
+
+
+def wait_for_status(client, stream_name, status, wait_timeout=300,
+ check_mode=False):
+ """Wait for the the status to change for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ stream_name (str): The name of the kinesis stream.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Kwargs:
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> wait_for_status(client, stream_name, 'ACTIVE', 300)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ stream = dict()
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ find_success, find_msg, stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if check_mode:
+ status_achieved = True
+ break
+
+ elif status != 'DELETING':
+ if find_success and stream:
+ if stream.get('StreamStatus') == status:
+ status_achieved = True
+ break
+
+ elif status == 'DELETING' and not check_mode:
+ if not find_success:
+ status_achieved = True
+ break
+
+ else:
+ time.sleep(polling_increment_secs)
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+ else:
+ err_msg = "Status {0} achieved successfully".format(status)
+
+ return status_achieved, err_msg, stream
+
+
+def tags_action(client, stream_name, tags, action='create', check_mode=False):
+ """Create or delete multiple tags from a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ resource_id (str): The Amazon resource id.
+ tags (list): List of dictionaries.
+ examples.. [{Name: "", Values: [""]}]
+
+ Kwargs:
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> resource_id = 'pcx-123345678'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, resource_id, tags)
+ [True, '']
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ""
+ params = {'StreamName': stream_name}
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['Tags'] = tags
+ client.add_tags_to_stream(**params)
+ success = True
+ elif action == 'delete':
+ params['TagKeys'] = tags.keys()
+ client.remove_tags_from_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg
+
+
+def recreate_tags_from_list(list_of_tags):
+ """Recreate tags from a list of tuples into the Amazon Tag format.
+ Args:
+ list_of_tags (list): List of tuples.
+
+ Basic Usage:
+ >>> list_of_tags = [('Env', 'Development')]
+ >>> recreate_tags_from_list(list_of_tags)
+ [
+ {
+ "Value": "Development",
+ "Key": "Env"
+ }
+ ]
+
+ Returns:
+ List
+ """
+ tags = list()
+ i = 0
+ list_of_tags = list_of_tags
+ for i in range(len(list_of_tags)):
+ key_name = list_of_tags[i][0]
+ key_val = list_of_tags[i][1]
+ tags.append(
+ {
+ 'Key': key_name,
+ 'Value': key_val
+ }
+ )
+ return tags
+
+
+def update_tags(client, stream_name, tags, check_mode=False):
+ """Update tags for an amazon resource.
+ Args:
+ resource_id (str): The Amazon resource id.
+ tags (dict): Dictionary of tags you want applied to the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> stream_name = 'test-stream'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, stream_name, tags)
+ [True, '']
+
+ Return:
+ Tuple (bool, str)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name, check_mode=check_mode)
+ )
+ if current_tags:
+ tags = make_tags_in_aws_format(tags)
+ current_tags_set = (
+ set(
+ reduce(
+ lambda x, y: x + y,
+ [make_tags_in_proper_format(current_tags).items()]
+ )
+ )
+ )
+
+ new_tags_set = (
+ set(
+ reduce(
+ lambda x, y: x + y,
+ [make_tags_in_proper_format(tags).items()]
+ )
+ )
+ )
+ tags_to_delete = list(current_tags_set.difference(new_tags_set))
+ tags_to_update = list(new_tags_set.difference(current_tags_set))
+ if tags_to_delete:
+ tags_to_delete = make_tags_in_proper_format(
+ recreate_tags_from_list(tags_to_delete)
+ )
+ delete_success, delete_msg = (
+ tags_action(
+ client, stream_name, tags_to_delete, action='delete',
+ check_mode=check_mode
+ )
+ )
+ if not delete_success:
+ return delete_success, changed, delete_msg
+ if tags_to_update:
+ tags = make_tags_in_proper_format(
+ recreate_tags_from_list(tags_to_update)
+ )
+ else:
+ return True, changed, 'Tags do not need to be updated'
+
+ if tags:
+ create_success, create_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if create_success:
+ changed = True
+ return create_success, changed, create_msg
+
+ return success, changed, err_msg
+
+
+def stream_action(client, stream_name, shard_count=1, action='create',
+ timeout=300, check_mode=False):
+ """Create or Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ shard_count (int): Number of shards this stream will use.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> shard_count = 20
+ >>> stream_action(client, stream_name, shard_count, action='create')
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['ShardCount'] = shard_count
+ client.create_stream(**params)
+ success = True
+ elif action == 'delete':
+ client.delete_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg
+
+
+def retention_action(client, stream_name, retention_period=24,
+ action='increase', check_mode=False):
+ """Increase or Decreaste the retention of messages in the Kinesis stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The
+
+ Kwargs:
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> stream_action(client, stream_name, retention_period, action='create')
+
+ Returns:
+ Tuple (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'increase':
+ params['RetentionPeriodHours'] = retention_period
+ client.increase_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period increased successfully to {0}'
+ .format(retention_period)
+ )
+ elif action == 'decrease':
+ params['RetentionPeriodHours'] = retention_period
+ client.decrease_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period decreased successfully to {0}'
+ .format(retention_period)
+ )
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'increase':
+ success = True
+ elif action == 'decrease':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg
+
+
+def update(client, current_stream, stream_name, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Update an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ tags (dict): The tags you want applied.
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> current_stream = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test-stream',
+ 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream',
+ 'StreamStatus': "ACTIVE'
+ }
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> stream_action(client, current_stream, stream_name,
+ retention_period, action='create' )
+
+ Returns:
+ Tuple (bool, bool, str)
+ """
+ success = True
+ changed = False
+ err_msg = ''
+ if retention_period:
+ if wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+
+ if current_stream['StreamStatus'] == 'ACTIVE':
+ retention_changed = False
+ if retention_period > current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period < current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='decrease',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period == current_stream['RetentionPeriodHours']:
+ retention_msg = (
+ 'Retention {0} is the same as {1}'
+ .format(
+ retention_period,
+ current_stream['RetentionPeriodHours']
+ )
+ )
+ success = True
+
+ if retention_changed:
+ success = True
+ changed = True
+
+ err_msg = retention_msg
+ if changed and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+ elif changed and not wait:
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ if current_stream['StreamStatus'] != 'ACTIVE':
+ err_msg = (
+ 'Retention Period for {0} is in the process of updating'
+ .format(stream_name)
+ )
+ return success, changed, err_msg
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream['StreamStatus'])
+ )
+ return success, changed, err_msg
+
+ if tags:
+ _, _, err_msg = (
+ update_tags(client, stream_name, tags, check_mode=check_mode)
+ )
+ if wait:
+ success, err_msg, _ = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if success and changed:
+ err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
+ elif success and not changed:
+ err_msg = 'Kinesis Stream {0} did not changed.'.format(stream_name)
+
+ return success, changed, err_msg
+
+
+def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Create an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ retention_period (int): Can not be less than 24 hours
+ default=None
+ tags (dict): The tags you want applied.
+ default=None
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> number_of_shards = 10
+ >>> tags = {'env': 'test'}
+ >>> create_stream(client, stream_name, number_of_shards, tags=tags)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ if current_stream['ShardsCount'] != number_of_shards:
+ err_msg = 'Can not change the number of shards in a Kinesis Stream'
+ return success, changed, err_msg, results
+
+ if stream_found and current_stream['StreamStatus'] == 'DELETING' and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if stream_found and current_stream['StreamStatus'] != 'DELETING':
+ success, changed, err_msg = update(
+ client, current_stream, stream_name, retention_period, tags,
+ wait, wait_timeout, check_mode=check_mode
+ )
+ else:
+ create_success, create_msg = (
+ stream_action(
+ client, stream_name, number_of_shards, action='create',
+ check_mode=check_mode
+ )
+ )
+ if create_success:
+ changed = True
+ if wait:
+ wait_success, wait_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = (
+ 'Kinesis Stream {0} is in the process of being created'
+ .format(stream_name)
+ )
+ if not wait_success:
+ return wait_success, True, wait_msg, results
+ else:
+ err_msg = (
+ 'Kinesis Stream {0} created successfully'
+ .format(stream_name)
+ )
+
+ if tags:
+ changed, err_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if retention_period and current_stream['StreamStatus'] == 'ACTIVE':
+ changed, err_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream['StreamStatus'])
+ )
+ success = create_success
+ changed = True
+
+ if success:
+ _, _, results = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ _, _, current_tags = (
+ get_tags(client, stream_name, check_mode=check_mode)
+ )
+ if current_tags and not check_mode:
+ current_tags = make_tags_in_proper_format(current_tags)
+ results['Tags'] = current_tags
+ elif check_mode and tags:
+ results['Tags'] = tags
+ else:
+ results['Tags'] = dict()
+ results = convert_to_lower(results)
+
+ return success, changed, err_msg, results
+
+
+def delete_stream(client, stream_name, wait=False, wait_timeout=300,
+ check_mode=False):
+ """Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> delete_stream(client, stream_name)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ success, err_msg = (
+ stream_action(
+ client, stream_name, action='delete', check_mode=check_mode
+ )
+ )
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'DELETING', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Stream {0} deleted successfully'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Stream {0} is in the process of being deleted'
+ .format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Stream {0} does not exist'.format(stream_name)
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(default=None, required=True),
+ shards=dict(default=None, required=False, type='int'),
+ retention_period=dict(default=None, required=False, type='int'),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ wait=dict(default=True, required=False, type='bool'),
+ wait_timeout=dict(default=300, required=False, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ retention_period = module.params.get('retention_period')
+ stream_name = module.params.get('name')
+ shards = module.params.get('shards')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if state == 'present' and not shards:
+ module.fail_json(msg='Shards is required when state == present.')
+
+ if retention_period:
+ if retention_period < 24:
+ module.fail_json(msg='Retention period can not be less than 24 hours.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ check_mode = module.check_mode
+ try:
+ region, ec2_url, aws_connect_kwargs = (
+ get_aws_connection_info(module, boto3=True)
+ )
+ client = (
+ boto3_conn(
+ module, conn_type='client', resource='kinesis',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs
+ )
+ )
+ except botocore.exceptions.ClientError as e:
+ err_msg = 'Boto3 Client Error - {0}'.format(str(e.msg))
+ module.fail_json(
+ success=False, changed=False, result={}, msg=err_msg
+ )
+
+ if state == 'present':
+ success, changed, err_msg, results = (
+ create_stream(
+ client, stream_name, shards, retention_period, tags,
+ wait, wait_timeout, check_mode
+ )
+ )
+ elif state == 'absent':
+ success, changed, err_msg, results = (
+ delete_stream(client, stream_name, wait, wait_timeout, check_mode)
+ )
+
+ if success:
+ module.exit_json(
+ success=success, changed=changed, msg=err_msg, **results
+ )
+ else:
+ module.fail_json(
+ success=success, changed=changed, msg=err_msg, result=results
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/lambda.py b/lib/ansible/modules/extras/cloud/amazon/lambda.py
new file mode 100644
index 0000000000..7fb5ea8371
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/lambda.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: lambda
+short_description: Manage AWS Lambda functions
+description:
+ - Allows for the management of Lambda functions.
+version_added: '2.2'
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - The name you want to assign to the function you are uploading. Cannot be changed.
+ required: true
+ state:
+ description:
+ - Create or delete Lambda function
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ runtime:
+ description:
+ - The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs. Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
+ required: true
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources
+ default: null
+ handler:
+ description:
+ - The function within your code that Lambda calls to begin execution
+ default: null
+ zip_file:
+ description:
+ - A .zip file containing your deployment package
+ required: false
+ default: null
+ aliases: [ 'src' ]
+ s3_bucket:
+ description:
+ - Amazon S3 bucket name where the .zip file containing your deployment package is stored
+ required: false
+ default: null
+ s3_key:
+ description:
+ - The Amazon S3 object (the deployment package) key name you want to upload
+ required: false
+ default: null
+ s3_object_version:
+ description:
+ - The Amazon S3 object (the deployment package) version you want to upload.
+ required: false
+ default: null
+ description:
+ description:
+ - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
+ required: false
+ default: null
+ timeout:
+ description:
+ - The function execution time at which Lambda should terminate the function.
+ required: false
+ default: 3
+ memory_size:
+ description:
+ - The amount of memory, in MB, your Lambda function is given
+ required: false
+ default: 128
+ vpc_subnet_ids:
+ description:
+ - List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
+ required: false
+ default: None
+ vpc_security_group_ids:
+ description:
+ - List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
+ required: false
+ default: None
+notes:
+ - 'Currently this module only supports uploaded code via S3'
+author:
+ - 'Steyn Huizinga (@steynovich)'
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Create Lambda functions
+tasks:
+- name: looped creation
+ lambda:
+ name: '{{ item.name }}'
+ state: present
+ zip_file: '{{ item.zip_file }}'
+ runtime: 'python2.7'
+ role_arn: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
+ handler: 'hello_python.my_handler'
+ vpc_subnet_ids:
+ - subnet-123abcde
+ - subnet-edcba321
+ vpc_security_group_ids:
+ - sg-123abcde
+ - sg-edcba321
+ with_items:
+ - { name: HelloWorld, zip_file: 'hello-code.zip' }
+ - { name: ByeBye, zip_file: 'bye-code.zip' }
+
+# Basic Lambda function deletion
+tasks:
+- name: Delete Lambda functions HelloWorld and ByeBye
+ lambda:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - HelloWorld
+ - ByeBye
+'''
+
+RETURN = '''
+output:
+ description: the data returned by create_function in boto3
+ returned: success
+ type: dict
+ sample:
+ {
+ 'FunctionName': 'string',
+ 'FunctionArn': 'string',
+ 'Runtime': 'nodejs',
+ 'Role': 'string',
+ 'Handler': 'string',
+ 'CodeSize': 123,
+ 'Description': 'string',
+ 'Timeout': 123,
+ 'MemorySize': 123,
+ 'LastModified': 'string',
+ 'CodeSha256': 'string',
+ 'Version': 'string',
+ }
+'''
+
+# Import from Python standard library
+import base64
+import hashlib
+
+try:
+ import botocore
+ HAS_BOTOCORE = True
+except ImportError:
+ HAS_BOTOCORE = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_current_function(connection, function_name):
+ try:
+ return connection.get_function(FunctionName=function_name)
+ except botocore.exceptions.ClientError as e:
+ return False
+
+
+def sha256sum(filename):
+ hasher = hashlib.sha256()
+ with open(filename, 'rb') as f:
+ hasher.update(f.read())
+
+ code_hash = hasher.digest()
+ code_b64 = base64.b64encode(code_hash)
+ hex_digest = code_b64.decode('utf-8')
+
+ return hex_digest
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ runtime=dict(type='str', required=True),
+ role_arn=dict(type='str', default=None),
+ handler=dict(type='str', default=None),
+ zip_file=dict(type='str', default=None, aliases=['src']),
+ s3_bucket=dict(type='str'),
+ s3_key=dict(type='str'),
+ s3_object_version=dict(type='str', default=None),
+ description=dict(type='str', default=''),
+ timeout=dict(type='int', default=3),
+ memory_size=dict(type='int', default=128),
+ vpc_subnet_ids=dict(type='list', default=None),
+ vpc_security_group_ids=dict(type='list', default=None),
+ )
+ )
+
+ mutually_exclusive = [['zip_file', 's3_key'],
+ ['zip_file', 's3_bucket'],
+ ['zip_file', 's3_object_version']]
+
+ required_together = [['s3_key', 's3_bucket', 's3_object_version'],
+ ['vpc_subnet_ids', 'vpc_security_group_ids']]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together)
+
+ name = module.params.get('name')
+ state = module.params.get('state').lower()
+ runtime = module.params.get('runtime')
+ role_arn = module.params.get('role_arn')
+ handler = module.params.get('handler')
+ s3_bucket = module.params.get('s3_bucket')
+ s3_key = module.params.get('s3_key')
+ s3_object_version = module.params.get('s3_object_version')
+ zip_file = module.params.get('zip_file')
+ description = module.params.get('description')
+ timeout = module.params.get('timeout')
+ memory_size = module.params.get('memory_size')
+ vpc_subnet_ids = module.params.get('vpc_subnet_ids')
+ vpc_security_group_ids = module.params.get('vpc_security_group_ids')
+
+ check_mode = module.check_mode
+ changed = False
+
+ if not HAS_BOTOCORE:
+ module.fail_json(msg='Python module "botocore" is missing, please install it')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='Python module "boto3" is missing, please install it')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ client = boto3_conn(module, conn_type='client', resource='lambda',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
+ module.fail_json(msg=str(e))
+
+ # Get function configuration if present, False otherwise
+ current_function = get_current_function(client, name)
+
+ # Update existing Lambda function
+ if state == 'present' and current_function:
+
+ # Get current state
+ current_config = current_function['Configuration']
+
+ # Update function configuration
+ func_kwargs = {'FunctionName': name}
+
+ # Update configuration if needed
+ if role_arn and current_config['Role'] != role_arn:
+ func_kwargs.update({'Role': role_arn})
+ if handler and current_config['Handler'] != handler:
+ func_kwargs.update({'Handler': handler})
+ if description and current_config['Description'] != description:
+ func_kwargs.update({'Description': description})
+ if timeout and current_config['Timeout'] != timeout:
+ func_kwargs.update({'Timeout': timeout})
+ if memory_size and current_config['MemorySize'] != memory_size:
+ func_kwargs.update({'MemorySize': memory_size})
+
+ # Check for unsupported mutation
+ if current_config['Runtime'] != runtime:
+ module.fail_json(msg='Cannot change runtime. Please recreate the function')
+
+ # If VPC configuration is desired
+ if vpc_subnet_ids or vpc_security_group_ids:
+ if len(vpc_subnet_ids) < 1:
+ module.fail_json(msg='At least 1 subnet is required')
+
+ if len(vpc_security_group_ids) < 1:
+ module.fail_json(msg='At least 1 security group is required')
+
+ if 'VpcConfig' in current_config:
+ # Compare VPC config with current config
+ current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
+ current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
+
+ subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
+ vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
+
+ if any((subnet_net_id_changed, vpc_security_group_ids_changed)):
+ func_kwargs.update({'VpcConfig':
+ {'SubnetIds': vpc_subnet_ids,'SecurityGroupIds': vpc_security_group_ids}})
+ else:
+ # No VPC configuration is desired, assure VPC config is empty when present in current config
+ if ('VpcConfig' in current_config and
+ 'VpcId' in current_config['VpcConfig'] and
+ current_config['VpcConfig']['VpcId'] != ''):
+ func_kwargs.update({'VpcConfig':{'SubnetIds': [], 'SecurityGroupIds': []}})
+
+ # Upload new configuration if configuration has changed
+ if len(func_kwargs) > 1:
+ try:
+ if not check_mode:
+ client.update_function_configuration(**func_kwargs)
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ # Update code configuration
+ code_kwargs = {'FunctionName': name}
+
+ # Update S3 location
+ if s3_bucket and s3_key:
+ # If function is stored on S3 always update
+ code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
+
+ # If S3 Object Version is given
+ if s3_object_version:
+ code_kwargs.update({'S3ObjectVersion': s3_object_version})
+
+ # Compare local checksum, update remote code when different
+ elif zip_file:
+ local_checksum = sha256sum(zip_file)
+ remote_checksum = current_config['CodeSha256']
+
+ # Only upload new code when local code is different compared to the remote code
+ if local_checksum != remote_checksum:
+ try:
+ with open(zip_file, 'rb') as f:
+ encoded_zip = f.read()
+ code_kwargs.update({'ZipFile': encoded_zip})
+ except IOError as e:
+ module.fail_json(msg=str(e))
+
+ # Upload new code if needed (e.g. code checksum has changed)
+ if len(code_kwargs) > 1:
+ try:
+ if not check_mode:
+ client.update_function_code(**code_kwargs)
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ # Describe function code and configuration
+ response = get_current_function(client, name)
+ if not response:
+ module.fail_json(msg='Unable to get function information after updating')
+
+ # We're done
+ module.exit_json(changed=changed, result=camel_dict_to_snake_dict(response))
+
+ # Function doesn't exists, create new Lambda function
+ elif state == 'present':
+ if s3_bucket and s3_key:
+ # If function is stored on S3
+ code = {'S3Bucket': s3_bucket,
+ 'S3Key': s3_key}
+ if s3_object_version:
+ code.update({'S3ObjectVersion': s3_object_version})
+ elif zip_file:
+ # If function is stored in local zipfile
+ try:
+ with open(zip_file, 'rb') as f:
+ zip_content = f.read()
+
+ code = {'ZipFile': zip_content}
+ except IOError as e:
+ module.fail_json(msg=str(e))
+
+ else:
+ module.fail_json(msg='Either S3 object or path to zipfile required')
+
+ func_kwargs = {'FunctionName': name,
+ 'Description': description,
+ 'Runtime': runtime,
+ 'Role': role_arn,
+ 'Handler': handler,
+ 'Code': code,
+ 'Timeout': timeout,
+ 'MemorySize': memory_size,
+ }
+
+ # If VPC configuration is given
+ if vpc_subnet_ids or vpc_security_group_ids:
+ if len(vpc_subnet_ids) < 1:
+ module.fail_json(msg='At least 1 subnet is required')
+
+ if len(vpc_security_group_ids) < 1:
+ module.fail_json(msg='At least 1 security group is required')
+
+ func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
+ 'SecurityGroupIds': vpc_security_group_ids}})
+
+ # Finally try to create function
+ try:
+ if not check_mode:
+ response = client.create_function(**func_kwargs)
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, result=camel_dict_to_snake_dict(response))
+
+ # Delete existing Lambda function
+ if state == 'absent' and current_function:
+ try:
+ if not check_mode:
+ client.delete_function(FunctionName=name)
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed)
+
+ # Function already absent, do nothing
+ elif state == 'absent':
+ module.exit_json(changed=changed)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/lambda_alias.py b/lib/ansible/modules/extras/cloud/amazon/lambda_alias.py
new file mode 100644
index 0000000000..d744ca7346
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/lambda_alias.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+DOCUMENTATION = '''
+---
+module: lambda_alias
+short_description: Creates, updates or deletes AWS Lambda function aliases.
+description:
+ - This module allows the management of AWS Lambda functions aliases via the Ansible
+ framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
+ itself and M(lambda_event) to manage event source mappings.
+
+version_added: "2.2"
+
+author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
+options:
+ function_name:
+ description:
+ - The name of the function alias.
+ required: true
+ state:
+ description:
+ - Describes the desired state.
+ required: true
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ description:
+ - Name of the function alias.
+ required: true
+ aliases: ['alias_name']
+ description:
+ description:
+ - A short, user-defined function alias description.
+ required: false
+ version:
+ description:
+ - Version associated with the Lambda function alias.
+ A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
+ required: false
+ aliases: ['function_version']
+requirements:
+ - boto3
+extends_documentation_fragment:
+ - aws
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example to create a lambda function and publish a version
+- hosts: localhost
+ gather_facts: no
+ vars:
+ state: present
+ project_folder: /path/to/deployment/package
+ deployment_package: lambda.zip
+ account: 123456789012
+ production_version: 5
+ tasks:
+ - name: AWS Lambda Function
+ lambda:
+ state: "{{ state | default('present') }}"
+ name: myLambdaFunction
+ publish: True
+ description: lambda function description
+ code_s3_bucket: package-bucket
+ code_s3_key: "lambda/{{ deployment_package }}"
+ local_path: "{{ project_folder }}/{{ deployment_package }}"
+ runtime: python2.7
+ timeout: 5
+ handler: lambda.handler
+ memory_size: 128
+ role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
+
+ - name: show results
+ debug: var=lambda_facts
+
+# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
+ - name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
+ lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_facts.FunctionName }}"
+ name: Dev
+ description: Development is $LATEST version
+
+# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
+ - name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
+ lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_facts.FunctionName }}"
+ name: QA
+ version: "{{ lambda_facts.Version }}"
+ description: "QA is version {{ lambda_facts.Version }}"
+ when: lambda_facts.Version != "$LATEST"
+
+# The Prod alias will have a fixed version based on a variable
+ - name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
+ lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_facts.FunctionName }}"
+ name: Prod
+ version: "{{ production_version }}"
+ description: "Production is version {{ production_version }}"
+'''
+
+RETURN = '''
+---
+alias_arn:
+ description: Full ARN of the function, including the alias
+ returned: success
+ type: string
+ sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
+description:
+ description: A short description of the alias
+ returned: success
+ type: string
+ sample: The development stage for my hot new app
+function_version:
+ description: The qualifier that the alias refers to
+ returned: success
+ type: string
+ sample: $LATEST
+name:
+ description: The name of the alias assigned
+ returned: success
+ type: string
+ sample: dev
+'''
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, boto3=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in module_params:
+ module_param = module.params.get(param, None)
+ if module_param:
+ api_params[pc(param)] = module_param
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return:
+ """
+
+ function_name = module.params['function_name']
+
+ # validate function name
+ if not re.search('^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
+ if module.params['function_version'] == 0:
+ module.params['function_version'] = '$LATEST'
+ else:
+ module.params['function_version'] = str(module.params['function_version'])
+
+ return
+
+
+def get_lambda_alias(module, aws):
+ """
+ Returns the lambda function alias if it exists.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return:
+ """
+
+ client = aws.client('lambda')
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ # check if alias exists and get facts
+ try:
+ results = client.get_alias(**api_params)
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ results = None
+ else:
+ module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
+
+ return results
+
+
+def lambda_alias(module, aws):
+ """
+ Adds, updates or deletes lambda function aliases.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return dict:
+ """
+ client = aws.client('lambda')
+ results = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ facts = get_lambda_alias(module, aws)
+ if facts:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+
+ # check if alias has changed -- only version and description can change
+ alias_params = ('function_version', 'description')
+ for param in alias_params:
+ if module.params.get(param) != facts.get(pc(param)):
+ changed = True
+ break
+
+ if changed:
+ api_params = set_api_params(module, ('function_name', 'name'))
+ api_params.update(set_api_params(module, alias_params))
+
+ if not module.check_mode:
+ try:
+ results = client.update_alias(**api_params)
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating function alias: {0}'.format(e))
+
+ else:
+ # create new function alias
+ api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
+
+ try:
+ if not module.check_mode:
+ results = client.create_alias(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating function alias: {0}'.format(e))
+
+ else: # state = 'absent'
+ if current_state == 'present':
+ # delete the function
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ try:
+ if not module.check_mode:
+ results = client.delete_alias(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error deleting function alias: {0}'.format(e))
+
+ return dict(changed=changed, **dict(results or facts))
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ function_name=dict(required=True, default=None),
+ name=dict(required=True, default=None, aliases=['alias_name']),
+ function_version=dict(type='int', required=False, default=0, aliases=['version']),
+ description=dict(required=False, default=None),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate dependencies
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required for this module.')
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ results = lambda_alias(module, aws)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+# ansible import module(s) kept at ~eof as recommended
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/lambda_event.py b/lib/ansible/modules/extras/cloud/amazon/lambda_event.py
new file mode 100644
index 0000000000..0d642734f0
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/lambda_event.py
@@ -0,0 +1,422 @@
+#!/usr/bin/python
+# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+DOCUMENTATION = '''
+---
+module: lambda_event
+short_description: Creates, updates or deletes AWS Lambda function event mappings.
+description:
+ - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
+ events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
+ AWS Lambda invokes the function.
+ It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
+ function itself and M(lambda_alias) to manage function aliases.
+
+version_added: "2.2"
+
+author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
+options:
+ lambda_function_arn:
+ description:
+ - The name or ARN of the lambda function.
+ required: true
+ aliases: ['function_name', 'function_arn']
+ state:
+ description:
+ - Describes the desired state.
+ required: true
+ default: "present"
+ choices: ["present", "absent"]
+ alias:
+ description:
+ - Name of the function alias. Mutually exclusive with C(version).
+ required: true
+ version:
+ description:
+ - Version of the Lambda function. Mutually exclusive with C(alias).
+ required: false
+ event_source:
+ description:
+ - Source of the event that triggers the lambda function.
+ required: false
+ default: stream
+ choices: ['stream']
+ source_params:
+ description:
+ - Sub-parameters required for event source.
+ - I(== stream event source ==)
+ - C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
+ - C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
+ - C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
+ time of invoking your function. Default is 100.
+ - C(starting_position) The position in the stream where AWS Lambda should start reading.
+ Choices are TRIM_HORIZON or LATEST.
+ required: true
+requirements:
+ - boto3
+extends_documentation_fragment:
+ - aws
+
+'''
+
+EXAMPLES = '''
+---
+# Example that creates a lambda event notification for a DynamoDB stream
+- hosts: localhost
+ gather_facts: no
+ vars:
+ state: present
+ tasks:
+ - name: DynamoDB stream event mapping
+ lambda_event:
+ state: "{{ state | default('present') }}"
+ event_source: stream
+ function_name: "{{ function_name }}"
+ alias: Dev
+ source_params:
+ source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
+ enabled: True
+ batch_size: 100
+ starting_position: TRIM_HORIZON
+
+ - name: show source event
+ debug: var=lambda_stream_events
+'''
+
+RETURN = '''
+---
+lambda_stream_events:
+ description: list of dictionaries returned by the API describing stream event mappings
+ returned: success
+ type: list
+'''
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, use_boto3=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ # set account ID
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def ordered_obj(obj):
+ """
+ Order object for comparison purposes
+
+ :param obj:
+ :return:
+ """
+
+ if isinstance(obj, dict):
+ return sorted((k, ordered_obj(v)) for k, v in obj.items())
+ if isinstance(obj, list):
+ return sorted(ordered_obj(x) for x in obj)
+ else:
+ return obj
+
+
+def set_api_sub_params(params):
+ """
+ Sets module sub-parameters to those expected by the boto3 API.
+
+ :param params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in params.keys():
+ param_value = params.get(param, None)
+ if param_value:
+ api_params[pc(param)] = param_value
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ function_name = module.params['lambda_function_arn']
+
+ # validate function name
+ if not re.search('^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # check if 'function_name' needs to be expanded in full ARN format
+ if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
+ function_name = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
+
+ qualifier = get_qualifier(module)
+ if qualifier:
+ function_arn = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+
+ return
+
+
+def get_qualifier(module):
+ """
+ Returns the function qualifier as a version or alias or None.
+
+ :param module:
+ :return:
+ """
+
+ qualifier = None
+ if module.params['version'] > 0:
+ qualifier = str(module.params['version'])
+ elif module.params['alias']:
+ qualifier = str(module.params['alias'])
+
+ return qualifier
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Lambda Event Handlers
+#
+# This section defines a lambda_event_X function where X is an AWS service capable of initiating
+# the execution of a Lambda function (pull only).
+#
+# ---------------------------------------------------------------------------------------------------
+
+def lambda_event_stream(module, aws):
+ """
+ Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ client = aws.client('lambda')
+ facts = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+
+ # check if required sub-parameters are present and valid
+ source_params = module.params['source_params']
+
+ source_arn = source_params.get('source_arn')
+ if source_arn:
+ api_params.update(EventSourceArn=source_arn)
+ else:
+ module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
+
+ # check if optional sub-parameters are valid, if present
+ batch_size = source_params.get('batch_size')
+ if batch_size:
+ try:
+ source_params['batch_size'] = int(batch_size)
+ except ValueError:
+ module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
+
+ # optional boolean value needs special treatment as not present does not imply False
+ source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
+
+ # check if event mapping exist
+ try:
+ facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
+ if facts:
+ current_state = 'present'
+ except ClientError as e:
+ module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
+
+ if state == 'present':
+ if current_state == 'absent':
+
+ starting_position = source_params.get('starting_position')
+ if starting_position:
+ api_params.update(StartingPosition=starting_position)
+ else:
+ module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
+
+ if source_arn:
+ api_params.update(Enabled=source_param_enabled)
+ if source_params.get('batch_size'):
+ api_params.update(BatchSize=source_params.get('batch_size'))
+
+ try:
+ if not module.check_mode:
+ facts = client.create_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
+
+ else:
+ # current_state is 'present'
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+ current_mapping = facts[0]
+ api_params.update(UUID=current_mapping['UUID'])
+ mapping_changed = False
+
+ # check if anything changed
+ if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
+ api_params.update(BatchSize=source_params['batch_size'])
+ mapping_changed = True
+
+ if source_param_enabled is not None:
+ if source_param_enabled:
+ if current_mapping['State'] not in ('Enabled', 'Enabling'):
+ api_params.update(Enabled=True)
+ mapping_changed = True
+ else:
+ if current_mapping['State'] not in ('Disabled', 'Disabling'):
+ api_params.update(Enabled=False)
+ mapping_changed = True
+
+ if mapping_changed:
+ try:
+ if not module.check_mode:
+ facts = client.update_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
+
+ else:
+ if current_state == 'present':
+ # remove the stream event mapping
+ api_params = dict(UUID=facts[0]['UUID'])
+
+ try:
+ if not module.check_mode:
+ facts = client.delete_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
+
+ return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
+
+
+def main():
+ """Produce a list of function suffixes which handle lambda events."""
+ this_module = sys.modules[__name__]
+ source_choices = ["stream"]
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
+ event_source=dict(required=True, default="stream", choices=source_choices),
+ source_params=dict(type='dict', required=True, default=None),
+ alias=dict(required=False, default=None),
+ version=dict(type='int', required=False, default=0),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['alias', 'version']],
+ required_together=[]
+ )
+
+ # validate dependencies
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required for this module.')
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ this_module_function = getattr(this_module, 'lambda_event_{}'.format(module.params['event_source'].lower()))
+
+ results = this_module_function(module, aws)
+
+ module.exit_json(**results)
+
+
+# ansible import module(s) kept at ~eof as recommended
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/lambda_facts.py b/lib/ansible/modules/extras/cloud/amazon/lambda_facts.py
new file mode 100644
index 0000000000..9103f69df5
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/lambda_facts.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import sys
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+DOCUMENTATION = '''
+---
+module: lambda_facts
+short_description: Gathers AWS Lambda function details as Ansible facts
+description:
+ - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
+ Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and
+ M(lambda_event) to manage lambda event source mappings.
+
+version_added: "2.2"
+
+options:
+ query:
+ description:
+ - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts.
+ required: true
+ choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
+ default: "all"
+ function_name:
+ description:
+ - The name of the lambda function for which facts are requested.
+ required: false
+ default: null
+ aliases: [ "function", "name"]
+ event_source_arn:
+ description:
+ - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
+ default: null
+ required: false
+author: Pierre Jodouin (@pjodouin)
+requirements:
+ - boto3
+extends_documentation_fragment:
+ - aws
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example of listing all info for a function
+- name: List all for a specific function
+ lambda_facts:
+ query: all
+ function_name: myFunction
+ register: my_function_details
+# List all versions of a function
+- name: List function versions
+ lambda_facts:
+ query: versions
+ function_name: myFunction
+ register: my_function_versions
+# List all lambda function versions
+- name: List all function
+ lambda_facts:
+ query: all
+ max_items: 20
+- name: show Lambda facts
+ debug: var=lambda_facts
+'''
+
+RETURN = '''
+---
+lambda_facts:
+ description: lambda facts
+ returned: success
+ type: dict
+lambda_facts.function:
+ description: lambda function list
+ returned: success
+ type: dict
+lambda_facts.function.TheName:
+ description: lambda function information, including event, mapping, and version information
+ returned: success
+ type: dict
+'''
+
+
+def fix_return(node):
+ """
+ fixup returned dictionary
+
+ :param node:
+ :return:
+ """
+
+ if isinstance(node, datetime.datetime):
+ node_value = str(node)
+
+ elif isinstance(node, list):
+ node_value = [fix_return(item) for item in node]
+
+ elif isinstance(node, dict):
+ node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
+
+ else:
+ node_value = node
+
+ return node_value
+
+
+def alias_details(client, module):
+ """
+ Returns list of aliases for a specified function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+ try:
+ lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(aliases=[])
+ else:
+ module.fail_json(msg='Unable to get {0} aliases, error: {1}'.format(function_name, e))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=aliases.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def all_details(client, module):
+ """
+ Returns all lambda related facts.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ lambda_facts[function_name] = {}
+ lambda_facts[function_name].update(config_details(client, module)[function_name])
+ lambda_facts[function_name].update(alias_details(client, module)[function_name])
+ lambda_facts[function_name].update(policy_details(client, module)[function_name])
+ lambda_facts[function_name].update(version_details(client, module)[function_name])
+ lambda_facts[function_name].update(mapping_details(client, module)[function_name])
+ else:
+ lambda_facts.update(config_details(client, module))
+
+ return lambda_facts
+
+
+def config_details(client, module):
+ """
+ Returns configuration details for one or all lambda functions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(function={})
+ else:
+ module.fail_json(msg='Unable to get {0} configuration, error: {1}'.format(function_name, e))
+ else:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(function_list=[])
+ else:
+ module.fail_json(msg='Unable to get function list, error: {0}'.format(e))
+
+ functions = dict()
+ for func in lambda_facts.pop('function_list', []):
+ functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
+ return functions
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def mapping_details(client, module):
+ """
+ Returns all lambda event source mappings.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+ params = dict()
+ function_name = module.params.get('function_name')
+
+ if function_name:
+ params['FunctionName'] = module.params.get('function_name')
+
+ if module.params.get('event_source_arn'):
+ params['EventSourceArn'] = module.params.get('event_source_arn')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(mappings=[])
+ else:
+ module.fail_json(msg='Unable to get source event mappings, error: {0}'.format(e))
+
+ if function_name:
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+ return camel_dict_to_snake_dict(lambda_facts)
+
+
+def policy_details(client, module):
+ """
+ Returns policy attached to a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(policy={})
+ else:
+ module.fail_json(msg='Unable to get {0} policy, error: {1}'.format(function_name, e))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=policy.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def version_details(client, module):
+ """
+ Returns all lambda function versions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(versions=[])
+ else:
+ module.fail_json(msg='Unable to get {0} versions, error: {1}'.format(function_name, e))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=versions.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ function_name=dict(required=False, default=None, aliases=['function', 'name']),
+ query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
+ event_source_arn=dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate dependencies
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required for this module.')
+
+ # validate function_name if present
+ function_name = module.params['function_name']
+ if function_name:
+ if not re.search("^[\w\-:]+$", function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ try:
+ region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ aws_connect_kwargs.update(dict(region=region,
+ endpoint=endpoint,
+ conn_type='client',
+ resource='lambda'
+ ))
+ client = boto3_conn(module, **aws_connect_kwargs)
+ except ClientError as e:
+ module.fail_json(msg="Can't authorize connection - {0}".format(e))
+
+ this_module = sys.modules[__name__]
+
+ invocations = dict(
+ aliases='alias_details',
+ all='all_details',
+ config='config_details',
+ mappings='mapping_details',
+ policy='policy_details',
+ versions='version_details',
+ )
+
+ this_module_function = getattr(this_module, invocations[module.params['query']])
+ all_facts = fix_return(this_module_function(client, module))
+
+ results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False)
+
+ if module.check_mode:
+ results['msg'] = 'Check mode set but ignored for fact gathering only.'
+
+ module.exit_json(**results)
+
+
+# ansible import module(s) kept at ~eof as recommended
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/redshift.py b/lib/ansible/modules/extras/cloud/amazon/redshift.py
new file mode 100644
index 0000000000..8b4c942e4a
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/redshift.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+module: redshift
+version_added: "2.1"
+short_description: create, delete, or modify an Amazon Redshift instance
+description:
+ - Creates, deletes, or modifies amazon Redshift cluster instances.
+options:
+ command:
+ description:
+ - Specifies the action to take.
+ required: true
+ choices: [ 'create', 'facts', 'delete', 'modify' ]
+ identifier:
+ description:
+ - Redshift cluster identifier.
+ required: true
+ node_type:
+ description:
+ - The node type of the cluster. Must be specified when command=create.
+ required: false
+ choices: ['dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge', ]
+ username:
+ description:
+ - Master database username. Used only when command=create.
+ required: false
+ password:
+ description:
+ - Master database password. Used only when command=create.
+ required: false
+ cluster_type:
+ description:
+ - The type of cluster.
+ required: false
+ choices: ['multi-node', 'single-node' ]
+ default: 'single-node'
+ db_name:
+ description:
+ - Name of the database.
+ required: false
+ default: null
+ availability_zone:
+ description:
+ - availability zone in which to launch cluster
+ required: false
+ aliases: ['zone', 'aws_zone']
+ number_of_nodes:
+ description:
+ - Number of nodes. Only used when cluster_type=multi-node.
+ required: false
+ default: null
+ cluster_subnet_group_name:
+ description:
+ - which subnet to place the cluster
+ required: false
+ aliases: ['subnet']
+ cluster_security_groups:
+ description:
+ - in which security group the cluster belongs
+ required: false
+ default: null
+ aliases: ['security_groups']
+ vpc_security_group_ids:
+ description:
+ - VPC security group
+ required: false
+ aliases: ['vpc_security_groups']
+ default: null
+ preferred_maintenance_window:
+ description:
+ - maintenance window
+ required: false
+ aliases: ['maintance_window', 'maint_window']
+ default: null
+ cluster_parameter_group_name:
+ description:
+ - name of the cluster parameter group
+ required: false
+ aliases: ['param_group_name']
+ default: null
+ automated_snapshot_retention_period:
+ description:
+ - period when the snapshot take place
+ required: false
+ aliases: ['retention_period']
+ default: null
+ port:
+ description:
+ - which port the cluster is listining
+ required: false
+ default: null
+ cluster_version:
+ description:
+ - which version the cluster should have
+ required: false
+ aliases: ['version']
+ choices: ['1.0']
+ default: null
+ allow_version_upgrade:
+ description:
+ - flag to determinate if upgrade of version is possible
+ required: false
+ aliases: ['version_upgrade']
+ default: null
+ publicly_accessible:
+ description:
+ - if the cluster is accessible publicly or not
+ required: false
+ default: null
+ encrypted:
+ description:
+ - if the cluster is encrypted or not
+ required: false
+ default: null
+ elastic_ip:
+ description:
+ - if the cluster has an elastic IP or not
+ required: false
+ default: null
+ new_cluster_identifier:
+ description:
+ - Only used when command=modify.
+ required: false
+ aliases: ['new_identifier']
+ default: null
+ wait:
+ description:
+ - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+requirements: [ 'boto' ]
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Basic cluster provisioning example
+- redshift: >
+ command=create
+ node_type=dw1.xlarge
+ identifier=new_cluster
+ username=cluster_admin
+ password=1nsecure
+'''
+
+RETURN = '''
+cluster:
+ description: dictionary containing all the cluster information
+ returned: success
+ type: dictionary
+ contains:
+ identifier:
+ description: Id of the cluster.
+ returned: success
+ type: string
+ sample: "new_redshift_cluster"
+ create_time:
+ description: Time of the cluster creation as timestamp.
+ returned: success
+ type: float
+ sample: 1430158536.308
+ status:
+ description: Stutus of the cluster.
+ returned: success
+ type: string
+ sample: "available"
+ db_name:
+ description: Name of the database.
+ returned: success
+ type: string
+ sample: "new_db_name"
+ availability_zone:
+ description: Amazon availability zone where the cluster is located.
+ returned: success
+ type: string
+ sample: "us-east-1b"
+ maintenance_window:
+ description: Time frame when maintenance/upgrade are done.
+ returned: success
+ type: string
+ sample: "sun:09:30-sun:10:00"
+ private_ip_address:
+ description: Private IP address of the main node.
+ returned: success
+ type: string
+ sample: "10.10.10.10"
+ public_ip_address:
+ description: Public IP address of the main node.
+ returned: success
+ type: string
+ sample: "0.0.0.0"
+ port:
+ description: Port of the cluster.
+ returned: success
+ type: int
+ sample: 5439
+ url:
+ description: FQDN of the main cluster node.
+ returned: success
+ type: string
+ sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
+'''
+
+import time
+
+try:
+ import boto
+ from boto import redshift
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def _collect_facts(resource):
+ """Transfrom cluster information to dict."""
+ facts = {
+ 'identifier' : resource['ClusterIdentifier'],
+ 'create_time' : resource['ClusterCreateTime'],
+ 'status' : resource['ClusterStatus'],
+ 'username' : resource['MasterUsername'],
+ 'db_name' : resource['DBName'],
+ 'availability_zone' : resource['AvailabilityZone'],
+ 'maintenance_window': resource['PreferredMaintenanceWindow'],
+ }
+
+ for node in resource['ClusterNodes']:
+ if node['NodeRole'] in ('SHARED', 'LEADER'):
+ facts['private_ip_address'] = node['PrivateIPAddress']
+ break
+
+ return facts
+
+
+def create_cluster(module, redshift):
+ """
+ Create a new cluster
+
+ module: AnsibleModule object
+ redshift: authenticated redshift connection object
+
+ Returns:
+ """
+
+ identifier = module.params.get('identifier')
+ node_type = module.params.get('node_type')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ changed = True
+ # Package up the optional parameters
+ params = {}
+ for p in ('db_name', 'cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port',
+ 'cluster_version', 'allow_version_upgrade',
+ 'number_of_nodes', 'publicly_accessible',
+ 'encrypted', 'elastic_ip'):
+ if module.params.get( p ):
+ params[ p ] = module.params.get( p )
+
+ try:
+ redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ changed = False
+ except boto.exception.JSONResponseError as e:
+ try:
+ redshift.create_cluster(identifier, node_type, username, password, **params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ if wait:
+ try:
+ wait_timeout = time.time() + wait_timeout
+ time.sleep(5)
+
+ while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ return(changed, _collect_facts(resource))
+
+
+def describe_cluster(module, redshift):
+ """
+ Collect data about the cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+ identifier = module.params.get('identifier')
+
+ try:
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ return(True, _collect_facts(resource))
+
+
+def delete_cluster(module, redshift):
+ """
+ Delete a cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ try:
+ redshift.delete_custer( identifier )
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ if wait:
+ try:
+ wait_timeout = time.time() + wait_timeout
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ return(True, {})
+
+
+def modify_cluster(module, redshift):
+ """
+ Modify an existing cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Package up the optional parameters
+ params = {}
+ for p in ('cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port', 'cluster_version',
+ 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
+ if module.params.get(p):
+ params[p] = module.params.get(p)
+
+ try:
+ redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ changed = False
+ except boto.exception.JSONResponseError as e:
+ try:
+ redshift.modify_cluster(identifier, **params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ if wait:
+ try:
+ wait_timeout = time.time() + wait_timeout
+ time.sleep(5)
+
+ while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ except boto.exception.JSONResponseError as e:
+ # https://github.com/boto/boto/issues/2776 is fixed.
+ module.fail_json(msg=str(e))
+
+ return(True, _collect_facts(resource))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
+ identifier = dict(required=True),
+ node_type = dict(choices=['dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge', ], required=False),
+ username = dict(required=False),
+ password = dict(no_log=True, required=False),
+ db_name = dict(require=False),
+ cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
+ cluster_security_groups = dict(aliases=['security_groups'], type='list'),
+ vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
+ cluster_subnet_group_name = dict(aliases=['subnet']),
+ availability_zone = dict(aliases=['aws_zone', 'zone']),
+ preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
+ cluster_parameter_group_name = dict(aliases=['param_group_name']),
+ automated_snapshot_retention_period = dict(aliases=['retention_period']),
+ port = dict(type='int'),
+ cluster_version = dict(aliases=['version'], choices=['1.0']),
+ allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool'),
+ number_of_nodes = dict(type='int'),
+ publicly_accessible = dict(type='bool'),
+ encrypted = dict(type='bool'),
+ elastic_ip = dict(required=False),
+ new_cluster_identifier = dict(aliases=['new_identifier']),
+ wait = dict(type='bool', default=False),
+ wait_timeout = dict(default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto v2.9.0+ required for this module')
+
+ command = module.params.get('command')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
+
+ # connect to the rds endpoint
+ try:
+ conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ changed = True
+ if command == 'create':
+ (changed, cluster) = create_cluster(module, conn)
+
+ elif command == 'facts':
+ (changed, cluster) = describe_cluster(module, conn)
+
+ elif command == 'delete':
+ (changed, cluster) = delete_cluster(module, conn)
+
+ elif command == 'modify':
+ (changed, cluster) = modify_cluster(module, conn)
+
+ module.exit_json(changed=changed, cluster=cluster)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/redshift_subnet_group.py b/lib/ansible/modules/extras/cloud/amazon/redshift_subnet_group.py
new file mode 100644
index 0000000000..d47593c797
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/redshift_subnet_group.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+module: redshift_subnet_group
+version_added: "2.1"
+short_description: mange Redshift cluster subnet groups
+description:
+ - Create, modifies, and deletes Redshift cluster subnet groups.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ default: 'present'
+ choices: ['present', 'absent' ]
+ group_name:
+ description:
+ - Cluster subnet group name.
+ required: true
+ aliases: ['name']
+ group_description:
+ description:
+ - Database subnet group description.
+ required: false
+ default: null
+ aliases: ['description']
+ group_subnets:
+ description:
+ - List of subnet IDs that make up the cluster subnet group.
+ required: false
+ default: null
+ aliases: ['subnets']
+requirements: [ 'boto' ]
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Create a Redshift subnet group
+- local_action:
+ module: redshift_subnet_group
+ state: present
+ group_name: redshift-subnet
+ group_description: Redshift subnet
+ group_subnets:
+ - 'subnet-aaaaa'
+ - 'subnet-bbbbb'
+
+# Remove subnet group
+redshift_subnet_group: >
+ state: absent
+ group_name: redshift-subnet
+'''
+
+RETURN = '''
+group:
+ description: dictionary containing all Redshift subnet group information
+ returned: success
+ type: dictionary
+ contains:
+ name:
+ description: name of the Redshift subnet group
+ returned: success
+ type: string
+ sample: "redshift_subnet_group_name"
+ vpc_id:
+ description: Id of the VPC where the subnet is located
+ returned: success
+ type: stering
+ sample: "vpc-aabb1122"
+'''
+
+try:
+ import boto
+ import boto.redshift
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ group_name=dict(required=True, aliases=['name']),
+ group_description=dict(required=False, aliases=['description']),
+ group_subnets=dict(required=False, aliases=['subnets'], type='list'),
+ ))
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto v2.9.0+ required for this module')
+
+ state = module.params.get('state')
+ group_name = module.params.get('group_name')
+ group_description = module.params.get('group_description')
+ group_subnets = module.params.get('group_subnets')
+
+ if state == 'present':
+ for required in ('group_name', 'group_description', 'group_subnets'):
+ if not module.params.get(required):
+ module.fail_json(msg=str("parameter %s required for state='present'" % required))
+ else:
+ for not_allowed in ('group_description', 'group_subnets'):
+ if module.params.get(not_allowed):
+ module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
+
+ # Connect to the Redshift endpoint.
+ try:
+ conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ changed = False
+ exists = False
+ group = None
+
+ try:
+ matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
+ exists = len(matching_groups) > 0
+ except boto.exception.JSONResponseError as e:
+ if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
+ #if e.code != 'ClusterSubnetGroupNotFoundFault':
+ module.fail_json(msg=str(e))
+
+ if state == 'absent':
+ if exists:
+ conn.delete_cluster_subnet_group(group_name)
+ changed = True
+
+ else:
+ if not exists:
+ new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
+ group = {
+ 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
+ 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['VpcId'],
+ }
+ else:
+ changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
+ group = {
+ 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
+ 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['VpcId'],
+ }
+
+ changed = True
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, group=group)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/route53_facts.py b/lib/ansible/modules/extras/cloud/amazon/route53_facts.py
new file mode 100644
index 0000000000..95c1491a66
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/route53_facts.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+module: route53_facts
+short_description: Retrieves route53 details using AWS methods
+description:
+ - Gets various details related to Route53 zone, record set or health check details
+version_added: "2.0"
+options:
+ query:
+ description:
+ - specifies the query action to take
+ required: True
+ choices: [
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ]
+ change_id:
+ description:
+ - The ID of the change batch request.
+ The value that you specify here is the value that
+ ChangeResourceRecordSets returned in the Id element
+ when you submitted the request.
+ required: false
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone
+ required: false
+ max_items:
+ description:
+ - Maximum number of items to return for various get/list requests
+ required: false
+ next_marker:
+ description:
+ - "Some requests such as list_command: hosted_zones will return a maximum
+ number of entries - EG 100. If the number of entries exceeds this maximum
+ another request can be sent using the NextMarker entry from the first response
+ to get the next page of results"
+ required: false
+ delegation_set_id:
+ description:
+ - The DNS Zone delegation set ID
+ required: false
+ start_record_name:
+ description:
+ - "The first name in the lexicographic ordering of domain names that you want
+ the list_command: record_sets to start listing from"
+ required: false
+ type:
+ description:
+ - The type of DNS record
+ required: false
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
+ dns_name:
+ description:
+ - The first name in the lexicographic ordering of domain names that you want
+ the list_command to start listing from
+ required: false
+ resource_id:
+ description:
+ - The ID/s of the specified resource/s
+ required: false
+ aliases: ['resource_ids']
+ health_check_id:
+ description:
+ - The ID of the health check
+ required: false
+ hosted_zone_method:
+ description:
+ - "This is used in conjunction with query: hosted_zone.
+ It allows for listing details, counts or tags of various
+ hosted zone details."
+ required: false
+ choices: [
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ health_check_method:
+ description:
+ - "This is used in conjunction with query: health_check.
+ It allows for listing details, counts or tags of various
+ health check details."
+ required: false
+ choices: [
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+author: Karen Cheng(@Etherdaemon)
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Simple example of listing all hosted zones
+- name: List all hosted zones
+ route53_facts:
+ query: hosted_zone
+ register: hosted_zones
+
+# Getting a count of hosted zones
+- name: Return a count of all hosted zones
+ route53_facts:
+ query: hosted_zone
+ hosted_zone_method: count
+ register: hosted_zone_count
+
+- name: List the first 20 resource record sets in a given hosted zone
+ route53_facts:
+ profile: account_name
+ query: record_sets
+ hosted_zone_id: 'ZZZ1111112222'
+ max_items: 20
+ register: record_sets
+
+- name: List first 20 health checks
+ route53_facts:
+ query: health_check
+ health_check_method: list
+ max_items: 20
+ register: health_checks
+
+- name: Get health check last failure_reason
+ route53_facts:
+ query: health_check
+ health_check_method: failure_reason
+ health_check_id: '00000000-1111-2222-3333-12345678abcd'
+ register: health_check_failure_reason
+
+- name: Retrieve reusable delegation set details
+ route53_facts:
+ query: reusable_delegation_set
+ delegation_set_id: 'delegation id'
+ register: delegation_sets
+
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_hosted_zone(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['Id'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ results = client.get_hosted_zone(**params)
+ return results
+
+
+def reusable_delegation_set_details(client, module):
+ params = dict()
+ if not module.params.get('delegation_set_id'):
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_reusable_delegation_sets(**params)
+ else:
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+ results = client.get_reusable_delegation_set(**params)
+
+ return results
+
+
+def list_hosted_zones(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ if module.params.get('delegation_set_id'):
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+
+ results = client.list_hosted_zones(**params)
+ return results
+
+
+def list_hosted_zones_by_name(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+
+ if module.params.get('dns_name'):
+ params['DNSName'] = module.params.get('dns_name')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ results = client.list_hosted_zones_by_name(**params)
+ return results
+
+
+def change_details(client, module):
+ params = dict()
+
+ if module.params.get('change_id'):
+ params['Id'] = module.params.get('change_id')
+ else:
+ module.fail_json(msg="change_id is required")
+
+ results = client.get_change(**params)
+ return results
+
+
+def checker_ip_range_details(client, module):
+ results = client.get_checker_ip_ranges()
+ return results
+
+
+def get_count(client, module):
+ if module.params.get('query') == 'health_check':
+ results = client.get_health_check_count()
+ else:
+ results = client.get_hosted_zone_count()
+
+ return results
+
+
+def get_health_check(client, module):
+ params = dict()
+
+ if not module.params.get('health_check_id'):
+ module.fail_json(msg="health_check_id is required")
+ else:
+ params['HealthCheckId'] = module.params.get('health_check_id')
+
+ if module.params.get('health_check_method') == 'details':
+ results = client.get_health_check(**params)
+ elif module.params.get('health_check_method') == 'failure_reason':
+ results = client.get_health_check_last_failure_reason(**params)
+ elif module.params.get('health_check_method') == 'status':
+ results = client.get_health_check_status(**params)
+
+ return results
+
+
+def get_resource_tags(client, module):
+ params = dict()
+
+ if module.params.get('resource_id'):
+ params['ResourceIds'] = module.params.get('resource_id')
+ else:
+ module.fail_json(msg="resource_id or resource_ids is required")
+
+ if module.params.get('query') == 'health_check':
+ params['ResourceType'] = 'healthcheck'
+ else:
+ params['ResourceType'] = 'hostedzone'
+
+ results = client.list_tags_for_resources(**params)
+ return results
+
+
+def list_health_checks(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_health_checks(**params)
+ return results
+
+
+def record_sets_details(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('start_record_name'):
+ params['StartRecordName'] = module.params.get('start_record_name')
+
+ if module.params.get('type') and not module.params.get('start_record_name'):
+ module.fail_json(msg="start_record_name must be specified if type is set")
+ elif module.params.get('type'):
+ params['StartRecordType'] = module.params.get('type')
+
+ results = client.list_resource_record_sets(**params)
+ return results
+
+
+def health_check_details(client, module):
+ health_check_invocations = {
+ 'list': list_health_checks,
+ 'details': get_health_check,
+ 'status': get_health_check,
+ 'failure_reason': get_health_check,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = health_check_invocations[module.params.get('health_check_method')](client, module)
+ return results
+
+
+def hosted_zone_details(client, module):
+ hosted_zone_invocations = {
+ 'details': get_hosted_zone,
+ 'list': list_hosted_zones,
+ 'list_by_name': list_hosted_zones_by_name,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
+ return results
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ query=dict(choices=[
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ], required=True),
+ change_id=dict(),
+ hosted_zone_id=dict(),
+ max_items=dict(type='str'),
+ next_marker=dict(),
+ delegation_set_id=dict(),
+ start_record_name=dict(),
+ type=dict(choices=[
+ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'
+ ]),
+ dns_name=dict(),
+ resource_id=dict(type='list', aliases=['resource_ids']),
+ health_check_id=dict(),
+ hosted_zone_method=dict(choices=[
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags'
+ ], default='list'),
+ health_check_method=dict(choices=[
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ], default='list'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['hosted_zone_method', 'health_check_method'],
+ ],
+ )
+
+ # Validate Requirements
+ if not (HAS_BOTO or HAS_BOTO3):
+ module.fail_json(msg='json and boto/boto3 is required.')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ invocations = {
+ 'change': change_details,
+ 'checker_ip_range': checker_ip_range_details,
+ 'health_check': health_check_details,
+ 'hosted_zone': hosted_zone_details,
+ 'record_sets': record_sets_details,
+ 'reusable_delegation_set': reusable_delegation_set_details,
+ }
+ results = invocations[module.params.get('query')](route53, module)
+
+ module.exit_json(**results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/route53_health_check.py b/lib/ansible/modules/extras/cloud/amazon/route53_health_check.py
new file mode 100644
index 0000000000..9ad7f63d45
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/route53_health_check.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: route53_health_check
+short_description: add or delete health-checks in Amazons Route53 DNS service
+description:
+ - Creates and deletes DNS Health checks in Amazons Route53 service
+ - Only the port, resource_path, string_match and request_interval are
+ considered when updating existing health-checks.
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Specifies the action to take.
+ required: true
+ choices: [ 'present', 'absent' ]
+ ip_address:
+ description:
+ - IP address of the end-point to check. Either this or `fqdn` has to be
+ provided.
+ required: false
+ default: null
+ port:
+ description:
+ - The port on the endpoint on which you want Amazon Route 53 to perform
+ health checks. Required for TCP checks.
+ required: false
+ default: null
+ type:
+ description:
+ - The type of health check that you want to create, which indicates how
+ Amazon Route 53 determines whether an endpoint is healthy.
+ required: true
+ choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
+ resource_path:
+ description:
+ - The path that you want Amazon Route 53 to request when performing
+ health checks. The path can be any value for which your endpoint will
+ return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
+ for example the file /docs/route53-health-check.html.
+ - Required for all checks except TCP.
+ - The path must begin with a /
+ - Maximum 255 characters.
+ required: false
+ default: null
+ fqdn:
+ description:
+ - Domain name of the endpoint to check. Either this or `ip_address` has
+ to be provided. When both are given the `fqdn` is used in the `Host:`
+ header of the HTTP request.
+ required: false
+ string_match:
+ description:
+ - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
+ that you want Amazon Route 53 to search for in the response body from
+ the specified resource. If the string appears in the first 5120 bytes
+ of the response body, Amazon Route 53 considers the resource healthy.
+ required: false
+ default: null
+ request_interval:
+ description:
+ - The number of seconds between the time that Amazon Route 53 gets a
+ response from your endpoint and the time that it sends the next
+ health-check request.
+ required: true
+ default: 30
+ choices: [ 10, 30 ]
+ failure_threshold:
+ description:
+ - The number of consecutive health checks that an endpoint must pass or
+ fail for Amazon Route 53 to change the current status of the endpoint
+ from unhealthy to healthy or vice versa.
+ required: true
+ default: 3
+ choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
+author: "zimbatm (@zimbatm)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Create a health-check for host1.example.com and use it in record
+- route53_health_check:
+ state: present
+ fqdn: host1.example.com
+ type: HTTP_STR_MATCH
+ resource_path: /
+ string_match: "Hello"
+ request_interval: 10
+ failure_threshold: 2
+ register: my_health_check
+
+- route53:
+ action: create
+ zone: "example.com"
+ type: CNAME
+ record: "www.example.com"
+ value: host1.example.com
+ ttl: 30
+ # Routing policy
+ identifier: "host1@www"
+ weight: 100
+ health_check: "{{ my_health_check.health_check.id }}"
+
+# Delete health-check
+- route53_health_check:
+ state: absent
+ fqdn: host1.example.com
+
+'''
+
+import time
+import uuid
+
+try:
+ import boto
+ import boto.ec2
+ from boto import route53
+ from boto.route53 import Route53Connection, exception
+ from boto.route53.healthcheck import HealthCheck
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+# Things that can't get changed:
+# protocol
+# ip_address or domain
+# request_interval
+# string_match if not previously enabled
+def find_health_check(conn, wanted):
+ """Searches for health checks that have the exact same set of immutable values"""
+ for check in conn.get_list_health_checks().HealthChecks:
+ config = check.HealthCheckConfig
+ if config.get('IPAddress') == wanted.ip_addr and config.get('FullyQualifiedDomainName') == wanted.fqdn and config.get('Type') == wanted.hc_type and config.get('RequestInterval') == str(wanted.request_interval):
+ return check
+ return None
+
+def to_health_check(config):
+ return HealthCheck(
+ config.get('IPAddress'),
+ config.get('Port'),
+ config.get('Type'),
+ config.get('ResourcePath'),
+ fqdn=config.get('FullyQualifiedDomainName'),
+ string_match=config.get('SearchString'),
+ request_interval=int(config.get('RequestInterval')),
+ failure_threshold=int(config.get('FailureThreshold')),
+ )
+
+def health_check_diff(a, b):
+ a = a.__dict__
+ b = b.__dict__
+ if a == b:
+ return {}
+ diff = {}
+ for key in set(a.keys()) | set(b.keys()):
+ if a.get(key) != b.get(key):
+ diff[key] = b.get(key)
+ return diff
+
+def to_template_params(health_check):
+ params = {
+ 'ip_addr_part': '',
+ 'port': health_check.port,
+ 'type': health_check.hc_type,
+ 'resource_path_part': '',
+ 'fqdn_part': '',
+ 'string_match_part': '',
+ 'request_interval': health_check.request_interval,
+ 'failure_threshold': health_check.failure_threshold,
+ }
+ if health_check.ip_addr:
+ params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
+ if health_check.resource_path:
+ params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
+ if health_check.fqdn:
+ params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
+ if health_check.string_match:
+ params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
+ return params
+
+XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
+
+POSTXMLBody = """
+ <CreateHealthCheckRequest xmlns="%(xmlns)s">
+ <CallerReference>%(caller_ref)s</CallerReference>
+ <HealthCheckConfig>
+ %(ip_addr_part)s
+ <Port>%(port)s</Port>
+ <Type>%(type)s</Type>
+ %(resource_path_part)s
+ %(fqdn_part)s
+ %(string_match_part)s
+ <RequestInterval>%(request_interval)s</RequestInterval>
+ <FailureThreshold>%(failure_threshold)s</FailureThreshold>
+ </HealthCheckConfig>
+ </CreateHealthCheckRequest>
+ """
+
+UPDATEHCXMLBody = """
+ <UpdateHealthCheckRequest xmlns="%(xmlns)s">
+ <HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
+ %(ip_addr_part)s
+ <Port>%(port)s</Port>
+ %(resource_path_part)s
+ %(fqdn_part)s
+ %(string_match_part)s
+ <FailureThreshold>%(failure_threshold)i</FailureThreshold>
+ </UpdateHealthCheckRequest>
+ """
+
+def create_health_check(conn, health_check, caller_ref = None):
+ if caller_ref is None:
+ caller_ref = str(uuid.uuid4())
+ uri = '/%s/healthcheck' % conn.Version
+ params = to_template_params(health_check)
+ params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
+
+ xml_body = POSTXMLBody % params
+ response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 201:
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ raise exception.DNSServerError(response.status, response.reason, body)
+
+def update_health_check(conn, health_check_id, health_check_version, health_check):
+ uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
+ params = to_template_params(health_check)
+ params.update(
+ xmlns=conn.XMLNameSpace,
+ health_check_version=health_check_version,
+ )
+ xml_body = UPDATEHCXMLBody % params
+ response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status not in (200, 204):
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state = dict(choices=['present', 'absent'], default='present'),
+ ip_address = dict(),
+ port = dict(type='int'),
+ type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
+ resource_path = dict(),
+ fqdn = dict(),
+ string_match = dict(),
+ request_interval = dict(type='int', choices=[10, 30], default=30),
+ failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto 2.27.0+ required for this module')
+
+ state_in = module.params.get('state')
+ ip_addr_in = module.params.get('ip_address')
+ port_in = module.params.get('port')
+ type_in = module.params.get('type')
+ resource_path_in = module.params.get('resource_path')
+ fqdn_in = module.params.get('fqdn')
+ string_match_in = module.params.get('string_match')
+ request_interval_in = module.params.get('request_interval')
+ failure_threshold_in = module.params.get('failure_threshold')
+
+ if ip_addr_in is None and fqdn_in is None:
+ module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
+
+ # Default port
+ if port_in is None:
+ if type_in in ['HTTP', 'HTTP_STR_MATCH']:
+ port_in = 80
+ elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
+ port_in = 443
+ else:
+ module.fail_json(msg="parameter 'port' is required for 'type' TCP")
+
+ # string_match in relation with type
+ if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
+ if string_match_in is None:
+ module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
+ elif len(string_match_in) > 255:
+ module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
+ elif string_match_in:
+ module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg = e.error_message)
+
+ changed = False
+ action = None
+ check_id = None
+ wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
+ existing_check = find_health_check(conn, wanted_config)
+ if existing_check:
+ check_id = existing_check.Id
+ existing_config = to_health_check(existing_check.HealthCheckConfig)
+
+ if state_in == 'present':
+ if existing_check is None:
+ action = "create"
+ check_id = create_health_check(conn, wanted_config).HealthCheck.Id
+ changed = True
+ else:
+ diff = health_check_diff(existing_config, wanted_config)
+ if not diff:
+ action = "update"
+ update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
+ changed = True
+ elif state_in == 'absent':
+ if check_id:
+ action = "delete"
+ conn.delete_health_check(check_id)
+ changed = True
+ else:
+ module.fail_json(msg = "Logic Error: Unknown state")
+
+ module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/route53_zone.py b/lib/ansible/modules/extras/cloud/amazon/route53_zone.py
new file mode 100644
index 0000000000..328d48dbf6
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/route53_zone.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+module: route53_zone
+short_description: add or delete Route53 zones
+description:
+ - Creates and deletes Route53 private and public zones
+version_added: "2.0"
+options:
+ zone:
+ description:
+ - "The DNS zone record (eg: foo.com.)"
+ required: true
+ state:
+ description:
+ - whether or not the zone should exist or not
+ required: false
+ default: true
+ choices: [ "present", "absent" ]
+ vpc_id:
+ description:
+ - The VPC ID the zone should be a part of (if this is going to be a private zone)
+ required: false
+ default: null
+ vpc_region:
+ description:
+ - The VPC Region the zone should be a part of (if this is going to be a private zone)
+ required: false
+ default: null
+ comment:
+ description:
+ - Comment associated with the zone
+ required: false
+ default: ''
+extends_documentation_fragment:
+ - aws
+ - ec2
+author: "Christopher Troup (@minichate)"
+'''
+
+EXAMPLES = '''
+# create a public zone
+- route53_zone: zone=example.com state=present comment="this is an example"
+
+# delete a public zone
+- route53_zone: zone=example.com state=absent
+
+- name: private zone for devel
+ route53_zone: zone=devel.example.com state=present vpc_id={{myvpc_id}} comment='developer domain'
+
+# more complex example
+- name: register output after creating zone in parameterized region
+ route53_zone:
+ vpc_id: "{{ vpc.vpc_id }}"
+ vpc_region: "{{ ec2_region }}"
+ zone: "{{ vpc_dns_zone }}"
+ state: present
+ register: zone_out
+
+- debug: var=zone_out
+
+'''
+
+RETURN='''
+comment:
+ description: optional hosted zone comment
+ returned: when hosted zone exists
+ type: string
+ sample: "Private zone"
+name:
+ description: hosted zone name
+ returned: when hosted zone exists
+ type: string
+ sample: "private.local."
+private_zone:
+ description: whether hosted zone is private or public
+ returned: when hosted zone exists
+ type: bool
+ sample: true
+vpc_id:
+ description: id of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: string
+ sample: "vpc-1d36c84f"
+vpc_region:
+ description: region of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: string
+ sample: "eu-west-1"
+zone_id:
+ description: hosted zone id
+ returned: when hosted zone exists
+ type: string
+ sample: "Z6JQG9820BEFMW"
+'''
+
+import time
+
+try:
+ import boto
+ import boto.ec2
+ from boto import route53
+ from boto.route53 import Route53Connection
+ from boto.route53.zone import Zone
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ zone=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ vpc_id=dict(default=None),
+ vpc_region=dict(default=None),
+ comment=dict(default='')))
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ zone_in = module.params.get('zone').lower()
+ state = module.params.get('state').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ comment = module.params.get('comment')
+
+ if zone_in[-1:] != '.':
+ zone_in += "."
+
+ private_zone = vpc_id is not None and vpc_region is not None
+
+ _, _, aws_connect_kwargs = get_aws_connection_info(module)
+
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg=e.error_message)
+
+ results = conn.get_all_hosted_zones()
+ zones = {}
+
+ for r53zone in results['ListHostedZonesResponse']['HostedZones']:
+ zone_id = r53zone['Id'].replace('/hostedzone/', '')
+ zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
+ if vpc_id and 'VPCs' in zone_details:
+ # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
+ if isinstance(zone_details['VPCs'], dict):
+ if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
+ zones[r53zone['Name']] = zone_id
+ else: # Forward compatibility for when boto fixes that bug
+ if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
+ zones[r53zone['Name']] = zone_id
+ else:
+ zones[r53zone['Name']] = zone_id
+
+ record = {
+ 'private_zone': private_zone,
+ 'vpc_id': vpc_id,
+ 'vpc_region': vpc_region,
+ 'comment': comment,
+ }
+
+ if state == 'present' and zone_in in zones:
+ if private_zone:
+ details = conn.get_hosted_zone(zones[zone_in])
+
+ if 'VPCs' not in details['GetHostedZoneResponse']:
+ module.fail_json(
+ msg="Can't change VPC from public to private"
+ )
+
+ vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
+ current_vpc_id = vpc_details['VPCId']
+ current_vpc_region = vpc_details['VPCRegion']
+
+ if current_vpc_id != vpc_id:
+ module.fail_json(
+ msg="Can't change VPC ID once a zone has been created"
+ )
+ if current_vpc_region != vpc_region:
+ module.fail_json(
+ msg="Can't change VPC Region once a zone has been created"
+ )
+
+ record['zone_id'] = zones[zone_in]
+ record['name'] = zone_in
+ module.exit_json(changed=False, set=record)
+
+ elif state == 'present':
+ result = conn.create_hosted_zone(zone_in, **record)
+ hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
+ zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
+ record['zone_id'] = zone_id
+ record['name'] = zone_in
+ module.exit_json(changed=True, set=record)
+
+ elif state == 'absent' and zone_in in zones:
+ conn.delete_hosted_zone(zones[zone_in])
+ module.exit_json(changed=True)
+
+ elif state == 'absent':
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/s3_bucket.py b/lib/ansible/modules/extras/cloud/amazon/s3_bucket.py
new file mode 100644
index 0000000000..704b6e73fe
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/s3_bucket.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: s3_bucket
+short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
+description:
+ - Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion)
+ required: false
+ default: no
+ choices: [ 'yes', 'no' ]
+ name:
+ description:
+ - Name of the s3 bucket
+ required: true
+ default: null
+ policy:
+ description:
+ - The JSON policy as a string.
+ required: false
+ default: null
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS
+ default: null
+ aliases: [ S3_URL ]
+ ceph:
+ description:
+ - Enable API compatibility with Ceph. It takes into account the S3 API subset working with Ceph in order to provide the same module behaviour where possible.
+ version_added: "2.2"
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost of the request and the data download from the bucket.
+ required: false
+ default: no
+ choices: [ 'yes', 'no' ]
+ state:
+ description:
+ - Create or remove the s3 bucket
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ tags:
+ description:
+ - tags dict to apply to bucket
+ required: false
+ default: null
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ required: false
+ default: null
+ choices: [ 'yes', 'no' ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple s3 bucket
+- s3_bucket:
+ name: mys3bucket
+
+# Create a simple s3 bucket on Ceph Rados Gateway
+- s3_bucket:
+ name: mys3bucket
+ s3_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an s3 bucket and any keys it contains
+- s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: yes
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+
+'''
+
+import os
+import xml.etree.ElementTree as ET
+import urlparse
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.s3.tagging import Tags, TagSet
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def get_request_payment_status(bucket):
+
+ response = bucket.get_request_payment()
+ root = ET.fromstring(response)
+ for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'):
+ payer = message.text
+
+ return (payer != "BucketOwner")
+
+
+def create_tags_container(tags):
+
+ tag_set = TagSet()
+ tags_obj = Tags()
+ for key, val in tags.iteritems():
+ tag_set.add_tag(key, val)
+
+ tags_obj.add_tag_set(tag_set)
+ return tags_obj
+
+
+def _create_or_update_bucket(connection, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ versioning = module.params.get("versioning")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ try:
+ bucket = connection.create_bucket(name, location=location)
+ changed = True
+ except S3CreateError as e:
+ module.fail_json(msg=e.message)
+
+ # Versioning
+ versioning_status = bucket.get_versioning_status()
+ if versioning_status:
+ if versioning is not None:
+ if versioning and versioning_status['Versioning'] != "Enabled":
+ try:
+ bucket.configure_versioning(versioning)
+ changed = True
+ versioning_status = bucket.get_versioning_status()
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+ elif not versioning and versioning_status['Versioning'] != "Enabled":
+ try:
+ bucket.configure_versioning(versioning)
+ changed = True
+ versioning_status = bucket.get_versioning_status()
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # Requester pays
+ requester_pays_status = get_request_payment_status(bucket)
+ if requester_pays_status != requester_pays:
+ if requester_pays:
+ payer='Requester'
+ else:
+ payer='BucketOwner'
+ bucket.set_request_payment(payer=payer)
+ changed = True
+ requester_pays_status = get_request_payment_status(bucket)
+
+ # Policy
+ try:
+ current_policy = bucket.get_policy()
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchBucketPolicy":
+ current_policy = None
+ else:
+ module.fail_json(msg=e.message)
+
+ if current_policy is not None:
+ if policy == {}:
+ try:
+ bucket.delete_policy()
+ changed = True
+ current_policy = bucket.get_policy()
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchBucketPolicy":
+ current_policy = None
+ else:
+ module.fail_json(msg=e.message)
+ if policy is not None:
+ if json.loads(current_policy) != json.loads(policy):
+ try:
+ bucket.set_policy(policy)
+ changed = True
+ current_policy = bucket.get_policy()
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # Tags
+ try:
+ current_tags = bucket.get_tags()
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchTagSet":
+ current_tags = None
+ else:
+ module.fail_json(msg=e.message)
+
+ if current_tags is None:
+ current_tags_dict = {}
+ else:
+ current_tags_dict = dict((t.key, t.value) for t in current_tags[0])
+
+ if tags is not None:
+ if current_tags_dict != tags:
+ try:
+ if tags:
+ bucket.set_tags(create_tags_container(tags))
+ else:
+ bucket.delete_tags()
+ current_tags_dict = tags
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status, requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
+
+
+def _destroy_bucket(connection, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ if e.error_code != "NoSuchBucket":
+ module.fail_json(msg=e.message)
+ else:
+ # Bucket already absent
+ module.exit_json(changed=changed)
+
+ if force:
+ try:
+ # Empty the bucket
+ for key in bucket.list():
+ key.delete()
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ try:
+ bucket = connection.delete_bucket(name)
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def _create_or_update_bucket_ceph(connection, module, location):
+ #TODO: add update
+
+ name = module.params.get("name")
+
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ try:
+ bucket = connection.create_bucket(name, location=location)
+ changed = True
+ except S3CreateError as e:
+ module.fail_json(msg=e.message)
+
+ if bucket:
+ module.exit_json(changed=changed)
+ else:
+ module.fail_json(msg='Unable to create bucket, no error from the API')
+
+
+def _destroy_bucket_ceph(connection, module):
+
+ _destroy_bucket(connection, module)
+
+
+def create_or_update_bucket(connection, module, location, flavour='aws'):
+ if flavour == 'ceph':
+ _create_or_update_bucket_ceph(connection, module, location)
+ else:
+ _create_or_update_bucket(connection, module, location)
+
+
+def destroy_bucket(connection, module, flavour='aws'):
+ if flavour == 'ceph':
+ _destroy_bucket_ceph(connection, module)
+ else:
+ _destroy_bucket(connection, module)
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def is_walrus(s3_url):
+ """ Return True if it's Walrus endpoint, not S3
+
+ We assume anything other than *.amazonaws.com is Walrus"""
+ if s3_url is not None:
+ o = urlparse.urlparse(s3_url)
+ return not o.hostname.endswith('amazonaws.com')
+ else:
+ return False
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ force=dict(required=False, default='no', type='bool'),
+ policy=dict(required=False, type='json'),
+ name=dict(required=True, type='str'),
+ requester_pays=dict(default='no', type='bool'),
+ s3_url=dict(aliases=['S3_URL'], type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ tags=dict(required=False, default=None, type='dict'),
+ versioning=dict(default=None, type='bool'),
+ ceph=dict(default='no', type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ s3_url = module.params.get('s3_url')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ ceph = module.params.get('ceph')
+
+ if ceph and not s3_url:
+ module.fail_json(msg='ceph flavour requires s3_url')
+
+ flavour = 'aws'
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to Walrus or fakes3
+ try:
+ if s3_url and ceph:
+ ceph = urlparse.urlparse(s3_url)
+ connection = boto.connect_s3(
+ host=ceph.hostname,
+ port=ceph.port,
+ is_secure=ceph.scheme == 'https',
+ calling_format=OrdinaryCallingFormat(),
+ **aws_connect_params
+ )
+ flavour = 'ceph'
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse.urlparse(s3_url)
+ connection = S3Connection(
+ is_secure=fakes3.scheme == 'fakes3s',
+ host=fakes3.hostname,
+ port=fakes3.port,
+ calling_format=OrdinaryCallingFormat(),
+ **aws_connect_params
+ )
+ elif is_walrus(s3_url):
+ walrus = urlparse.urlparse(s3_url).hostname
+ connection = boto.connect_walrus(walrus, **aws_connect_params)
+ else:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to S3: %s' % str(e))
+
+ if connection is None: # this should never happen
+ module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_bucket(connection, module, location)
+ elif state == 'absent':
+ destroy_bucket(connection, module, flavour=flavour)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/s3_lifecycle.py b/lib/ansible/modules/extras/cloud/amazon/s3_lifecycle.py
new file mode 100644
index 0000000000..2541539536
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/s3_lifecycle.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: s3_lifecycle
+short_description: Manage s3 bucket lifecycle rules in AWS
+description:
+ - Manage s3 bucket lifecycle rules in AWS
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+notes:
+ - If specifying expiration time as days then transition time must also be specified in days
+ - If specifying expiration time as a date then transition time must also be specified as a date
+requirements:
+ - python-dateutil
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ expiration_date:
+ description:
+ - "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified."
+ required: false
+ default: null
+ expiration_days:
+ description:
+ - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
+ required: false
+ default: null
+ prefix:
+ description:
+ - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
+ required: false
+ default: null
+ rule_id:
+ description:
+ - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
+ required: false
+ default: null
+ state:
+ description:
+ - "Create or remove the lifecycle rule"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ status:
+ description:
+ - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
+ required: false
+ default: enabled
+ choices: [ 'enabled', 'disabled' ]
+ storage_class:
+ description:
+ - "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'."
+ - "The 'standard_ia' class is only being available from Ansible version 2.2."
+ required: false
+ default: glacier
+ choices: [ 'glacier', 'standard_ia']
+ transition_date:
+ description:
+ - "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required."
+ required: false
+ default: null
+ transition_days:
+ description:
+ - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
+- s3_lifecycle:
+ name: mybucket
+ expiration_days: 30
+ prefix: /logs/
+ status: enabled
+ state: present
+
+# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
+- s3_lifecycle:
+ name: mybucket
+ transition_days: 7
+ expiration_days: 90
+ prefix: /logs/
+ status: enabled
+ state: present
+
+# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified.
+# Be sure to quote your date strings
+- s3_lifecycle:
+ name: mybucket
+ transition_date: "2020-12-30T00:00:00.000Z"
+ expiration_date: "2030-12-30T00:00:00.000Z"
+ prefix: /logs/
+ status: enabled
+ state: present
+
+# Disable the rule created above
+- s3_lifecycle:
+ name: mybucket
+ prefix: /logs/
+ status: disabled
+ state: present
+
+# Delete the lifecycle rule created above
+- s3_lifecycle:
+ name: mybucket
+ prefix: /logs/
+ state: absent
+
+# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
+- s3_lifecycle:
+ name: mybucket
+ prefix: /backups/
+ storage_class: standard_ia
+ transition_days: 31
+ state: present
+ status: enabled
+
+'''
+
+import xml.etree.ElementTree as ET
+import copy
+import datetime
+
+try:
+ import dateutil.parser
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+try:
+ import boto
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def create_lifecycle_rule(connection, module):
+
+ name = module.params.get("name")
+ expiration_date = module.params.get("expiration_date")
+ expiration_days = module.params.get("expiration_days")
+ prefix = module.params.get("prefix")
+ rule_id = module.params.get("rule_id")
+ status = module.params.get("status")
+ storage_class = module.params.get("storage_class")
+ transition_date = module.params.get("transition_date")
+ transition_days = module.params.get("transition_days")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError, e:
+ module.fail_json(msg=e.message)
+
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle_obj = bucket.get_lifecycle_config()
+ except S3ResponseError, e:
+ if e.error_code == "NoSuchLifecycleConfiguration":
+ current_lifecycle_obj = Lifecycle()
+ else:
+ module.fail_json(msg=e.message)
+
+ # Create expiration
+ if expiration_days is not None:
+ expiration_obj = Expiration(days=expiration_days)
+ elif expiration_date is not None:
+ expiration_obj = Expiration(date=expiration_date)
+ else:
+ expiration_obj = None
+
+ # Create transition
+ if transition_days is not None:
+ transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
+ elif transition_date is not None:
+ transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
+ else:
+ transition_obj = None
+
+ # Create rule
+ rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
+
+ # Create lifecycle
+ lifecycle_obj = Lifecycle()
+
+ appended = False
+ # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
+ if current_lifecycle_obj:
+ # If rule ID exists, use that for comparison otherwise compare based on prefix
+ for existing_rule in current_lifecycle_obj:
+ if rule.id == existing_rule.id:
+ if compare_rule(rule, existing_rule):
+ lifecycle_obj.append(rule)
+ appended = True
+ else:
+ lifecycle_obj.append(rule)
+ changed = True
+ appended = True
+ elif rule.prefix == existing_rule.prefix:
+ existing_rule.id = None
+ if compare_rule(rule, existing_rule):
+ lifecycle_obj.append(rule)
+ appended = True
+ else:
+ lifecycle_obj.append(rule)
+ changed = True
+ appended = True
+ else:
+ lifecycle_obj.append(existing_rule)
+ # If nothing appended then append now as the rule must not exist
+ if not appended:
+ lifecycle_obj.append(rule)
+ changed = True
+ else:
+ lifecycle_obj.append(rule)
+ changed = True
+
+ # Write lifecycle to bucket
+ try:
+ bucket.configure_lifecycle(lifecycle_obj)
+ except S3ResponseError, e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+def compare_rule(rule_a, rule_b):
+
+ # Copy objects
+ rule1 = copy.deepcopy(rule_a)
+ rule2 = copy.deepcopy(rule_b)
+
+ # Delete Rule from Rule
+ try:
+ del rule1.Rule
+ except AttributeError:
+ pass
+
+ try:
+ del rule2.Rule
+ except AttributeError:
+ pass
+
+ # Extract Expiration and Transition objects
+ rule1_expiration = rule1.expiration
+ rule1_transition = rule1.transition
+ rule2_expiration = rule2.expiration
+ rule2_transition = rule2.transition
+
+ # Delete the Expiration and Transition objects from the Rule objects
+ del rule1.expiration
+ del rule1.transition
+ del rule2.expiration
+ del rule2.transition
+
+ # Compare
+ if rule1_transition is None:
+ rule1_transition = Transition()
+ if rule2_transition is None:
+ rule2_transition = Transition()
+ if rule1_expiration is None:
+ rule1_expiration = Expiration()
+ if rule2_expiration is None:
+ rule2_expiration = Expiration()
+
+ if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__):
+ return True
+ else:
+ return False
+
+
+def destroy_lifecycle_rule(connection, module):
+
+ name = module.params.get("name")
+ prefix = module.params.get("prefix")
+ rule_id = module.params.get("rule_id")
+ changed = False
+
+ if prefix is None:
+ prefix = ""
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError, e:
+ module.fail_json(msg=e.message)
+
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle_obj = bucket.get_lifecycle_config()
+ except S3ResponseError, e:
+ if e.error_code == "NoSuchLifecycleConfiguration":
+ module.exit_json(changed=changed)
+ else:
+ module.fail_json(msg=e.message)
+
+ # Create lifecycle
+ lifecycle_obj = Lifecycle()
+
+ # Check if rule exists
+ # If an ID exists, use that otherwise compare based on prefix
+ if rule_id is not None:
+ for existing_rule in current_lifecycle_obj:
+ if rule_id == existing_rule.id:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_obj.append(existing_rule)
+ else:
+ for existing_rule in current_lifecycle_obj:
+ if prefix == existing_rule.prefix:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_obj.append(existing_rule)
+
+ # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
+ try:
+ if lifecycle_obj:
+ bucket.configure_lifecycle(lifecycle_obj)
+ else:
+ bucket.delete_lifecycle_configuration()
+ except BotoServerError, e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=True, type='str'),
+ expiration_days = dict(default=None, required=False, type='int'),
+ expiration_date = dict(default=None, required=False, type='str'),
+ prefix = dict(default=None, required=False),
+ requester_pays = dict(default='no', type='bool'),
+ rule_id = dict(required=False, type='str'),
+ state = dict(default='present', choices=['present', 'absent']),
+ status = dict(default='enabled', choices=['enabled', 'disabled']),
+ storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
+ transition_days = dict(default=None, required=False, type='int'),
+ transition_date = dict(default=None, required=False, type='str')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive = [
+ [ 'expiration_days', 'expiration_date' ],
+ [ 'expiration_days', 'transition_date' ],
+ [ 'transition_days', 'transition_date' ],
+ [ 'transition_days', 'expiration_date' ]
+ ]
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ if not HAS_DATEUTIL:
+ module.fail_json(msg='dateutil required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ try:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+
+ expiration_date = module.params.get("expiration_date")
+ transition_date = module.params.get("transition_date")
+ state = module.params.get("state")
+ storage_class = module.params.get("storage_class")
+
+ # If expiration_date set, check string is valid
+ if expiration_date is not None:
+ try:
+ datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError, e:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
+
+ if transition_date is not None:
+ try:
+ datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError, e:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
+
+ boto_required_version = (2,40,0)
+ if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
+ module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
+
+ if state == 'present':
+ create_lifecycle_rule(connection, module)
+ elif state == 'absent':
+ destroy_lifecycle_rule(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/s3_logging.py b/lib/ansible/modules/extras/cloud/amazon/s3_logging.py
new file mode 100644
index 0000000000..dca2a28aca
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/s3_logging.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: s3_logging
+short_description: Manage logging facility of an s3 bucket in AWS
+description:
+ - Manage logging facility of an s3 bucket in AWS
+version_added: "2.0"
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket."
+ required: true
+ state:
+ description:
+ - "Enable or disable logging."
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ target_bucket:
+ description:
+ - "The bucket to log to. Required when state=present."
+ required: false
+ default: null
+ target_prefix:
+ description:
+ - "The prefix that should be prepended to the generated log files written to the target_bucket."
+ required: false
+ default: ""
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
+ s3_logging:
+ name: mywebsite.com
+ target_bucket: mylogs
+ target_prefix: logs/mywebsite.com
+ state: present
+
+- name: Remove logging on an s3 bucket
+ s3_logging:
+ name: mywebsite.com
+ state: absent
+
+'''
+
+try:
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def compare_bucket_logging(bucket, target_bucket, target_prefix):
+
+ bucket_log_obj = bucket.get_logging_status()
+ if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
+ return False
+ else:
+ return True
+
+
+def enable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ target_bucket = module.params.get("target_bucket")
+ target_prefix = module.params.get("target_prefix")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ try:
+ if not compare_bucket_logging(bucket, target_bucket, target_prefix):
+ # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
+ try:
+ target_bucket_obj = connection.get_bucket(target_bucket)
+ except S3ResponseError as e:
+ if e.status == 301:
+ module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
+ else:
+ module.fail_json(msg=e.message)
+ target_bucket_obj.set_as_logging_target()
+
+ bucket.enable_logging(target_bucket, target_prefix)
+ changed = True
+
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def disable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ if not compare_bucket_logging(bucket, None, None):
+ bucket.disable_logging()
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=True),
+ target_bucket = dict(required=False, default=None),
+ target_prefix = dict(required=False, default=""),
+ state = dict(required=False, default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ try:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_bucket_logging(connection, module)
+ elif state == 'absent':
+ disable_bucket_logging(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/s3_website.py b/lib/ansible/modules/extras/cloud/amazon/s3_website.py
new file mode 100644
index 0000000000..93de721095
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/s3_website.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: s3_website
+short_description: Configure an s3 bucket as a website
+description:
+ - Configure an s3 bucket as a website
+version_added: "2.2"
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ default: null
+ error_key:
+ description:
+ - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
+ required: false
+ default: null
+ redirect_all_requests:
+ description:
+ - "Describes the redirect behavior for every request to this s3 bucket website endpoint"
+ required: false
+ default: null
+ region:
+ description:
+ - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
+ required: false
+ default: null
+ state:
+ description:
+ - "Add or remove s3 website configuration"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ suffix:
+ description:
+ - "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character."
+ required: false
+ default: index.html
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Configure an s3 bucket to redirect all requests to example.com
+- s3_website:
+ name: mybucket.com
+ redirect_all_requests: example.com
+ state: present
+
+# Remove website configuration from an s3 bucket
+- s3_website:
+ name: mybucket.com
+ state: absent
+
+# Configure an s3 bucket as a website with index and error pages
+- s3_website:
+ name: mybucket.com
+ suffix: home.htm
+ error_key: errors/404.htm
+ state: present
+
+'''
+
+RETURN = '''
+index_document:
+ suffix:
+ description: suffix that is appended to a request that is for a directory on the website endpoint
+ returned: success
+ type: string
+ sample: index.html
+error_document:
+ key:
+ description: object key name to use when a 4XX class error occurs
+ returned: when error_document parameter set
+ type: string
+ sample: error.html
+redirect_all_requests_to:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when redirect all requests parameter set
+ type: string
+ sample: ansible.com
+routing_rules:
+ routing_rule:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when host name set as part of redirect rule
+ type: string
+ sample: ansible.com
+ condition:
+ key_prefix_equals:
+ description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html
+ returned: when routing rule present
+ type: string
+ sample: docs/
+ redirect:
+ replace_key_prefix_with:
+ description: object key prefix to use in the redirect request
+ returned: when routing rule present
+ type: string
+ sample: documents/
+
+'''
+
+import time
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError, NoCredentialsError
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def _create_redirect_dict(url):
+
+ redirect_dict = {}
+ url_split = url.split(':')
+
+ # Did we split anything?
+ if len(url_split) == 2:
+ redirect_dict[u'Protocol'] = url_split[0]
+ redirect_dict[u'HostName'] = url_split[1].replace('//', '')
+ elif len(url_split) == 1:
+ redirect_dict[u'HostName'] = url_split[0]
+ else:
+ raise ValueError('Redirect URL appears invalid')
+
+ return redirect_dict
+
+
+def _create_website_configuration(suffix, error_key, redirect_all_requests):
+
+ website_configuration = {}
+
+ if error_key is not None:
+ website_configuration['ErrorDocument'] = { 'Key': error_key }
+
+ if suffix is not None:
+ website_configuration['IndexDocument'] = { 'Suffix': suffix }
+
+ if redirect_all_requests is not None:
+ website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
+
+ return website_configuration
+
+
+def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
+
+ bucket_name = module.params.get("name")
+ redirect_all_requests = module.params.get("redirect_all_requests")
+ # If redirect_all_requests is set then don't use the default suffix that has been set
+ if redirect_all_requests is not None:
+ suffix = None
+ else:
+ suffix = module.params.get("suffix")
+ error_key = module.params.get("error_key")
+ changed = False
+
+ try:
+ bucket_website = resource_connection.BucketWebsite(bucket_name)
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ try:
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
+ website_config = None
+ else:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ if website_config is None:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
+ (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
+ (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
+
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+ except KeyError as e:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+
+ # Wait 5 secs before getting the website_config again to give it time to update
+ time.sleep(5)
+
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
+
+
+def disable_bucket_as_website(client_connection, module):
+
+ changed = False
+ bucket_name = module.params.get("name")
+
+ try:
+ client_connection.get_bucket_website(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
+ module.exit_json(changed=changed)
+ else:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ try:
+ client_connection.delete_bucket_website(Bucket=bucket_name)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ suffix=dict(type='str', required=False, default='index.html'),
+ error_key=dict(type='str', required=False),
+ redirect_all_requests=dict(type='str', required=False)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive = [
+ ['redirect_all_requests', 'suffix'],
+ ['redirect_all_requests', 'error_key']
+ ])
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
+ resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_or_update_bucket_as_website(client_connection, resource_connection, module)
+ elif state == 'absent':
+ disable_bucket_as_website(client_connection, module)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/sns_topic.py b/lib/ansible/modules/extras/cloud/amazon/sns_topic.py
new file mode 100644
index 0000000000..f4916693ed
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/sns_topic.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = """
+module: sns_topic
+short_description: Manages AWS SNS topics and subscriptions
+description:
+ - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
+version_added: 2.0
+author:
+ - "Joel Thompson (@joelthompson)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ name:
+ description:
+ - The name or ARN of the SNS topic to converge
+ required: True
+ state:
+ description:
+ - Whether to create or destroy an SNS topic
+ required: False
+ default: present
+ choices: ["absent", "present"]
+ display_name:
+ description:
+ - Display name of the topic
+ required: False
+ default: None
+ policy:
+ description:
+ - Policy to apply to the SNS topic
+ required: False
+ default: None
+ delivery_policy:
+ description:
+ - Delivery policy to apply to the SNS topic
+ required: False
+ default: None
+ subscriptions:
+ description:
+ - List of subscriptions to apply to the topic. Note that AWS requires
+ subscriptions to be confirmed, so you will need to confirm any new
+ subscriptions.
+ required: False
+ default: []
+ purge_subscriptions:
+ description:
+ - "Whether to purge any subscriptions not listed here. NOTE: AWS does not
+ allow you to purge any PendingConfirmation subscriptions, so if any
+ exist and would be purged, they are silently skipped. This means that
+ somebody could come back later and confirm the subscription. Sorry.
+ Blame Amazon."
+ required: False
+ default: True
+extends_documentation_fragment: aws
+requirements: [ "boto" ]
+"""
+
+EXAMPLES = """
+
+- name: Create alarm SNS topic
+ sns_topic:
+ name: "alarms"
+ state: present
+ display_name: "alarm SNS topic"
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 2
+ maxDelayTarget: 4
+ numRetries: 3
+ numMaxDelayRetries: 5
+ backoffFunction: "<linear|arithmetic|geometric|exponential>"
+ disableSubscriptionOverrides: True
+ defaultThrottlePolicy:
+ maxReceivesPerSecond: 10
+ subscriptions:
+ - endpoint: "my_email_address@example.com"
+ protocol: "email"
+ - endpoint: "my_mobile_number"
+ protocol: "sms"
+
+"""
+
+RETURN = '''
+sns_arn:
+ description: The ARN of the topic you are modifying
+ type: string
+ sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
+
+sns_topic:
+ description: Dict of sns topic details
+ type: dict
+ sample:
+ name: sns-topic-name
+ state: present
+ display_name: default
+ policy: {}
+ delivery_policy: {}
+ subscriptions_new: []
+ subscriptions_existing: []
+ subscriptions_deleted: []
+ subscriptions_added: []
+ subscriptions_purge': false
+ check_mode: false
+ topic_created: false
+ topic_deleted: false
+ attributes_set: []
+'''
+
+import sys
+import time
+import json
+import re
+
+try:
+ import boto.sns
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+class SnsTopicManager(object):
+ """ Handles SNS Topic creation and destruction """
+
+ def __init__(self,
+ module,
+ name,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ check_mode,
+ region,
+ **aws_connect_params):
+
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.connection = self._get_boto_connection()
+ self.changed = False
+ self.module = module
+ self.name = name
+ self.state = state
+ self.display_name = display_name
+ self.policy = policy
+ self.delivery_policy = delivery_policy
+ self.subscriptions = subscriptions
+ self.subscriptions_existing = []
+ self.subscriptions_deleted = []
+ self.subscriptions_added = []
+ self.purge_subscriptions = purge_subscriptions
+ self.check_mode = check_mode
+ self.topic_created = False
+ self.topic_deleted = False
+ self.arn_topic = None
+ self.attributes_set = []
+
+ def _get_boto_connection(self):
+ try:
+ return connect_to_aws(boto.sns, self.region,
+ **self.aws_connect_params)
+ except BotoServerError, err:
+ self.module.fail_json(msg=err.message)
+
+ def _get_all_topics(self):
+ next_token = None
+ topics = []
+ while True:
+ try:
+ response = self.connection.get_all_topics(next_token)
+ except BotoServerError, err:
+ module.fail_json(msg=err.message)
+ topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
+ next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
+ if not next_token:
+ break
+ return [t['TopicArn'] for t in topics]
+
+
+ def _arn_topic_lookup(self):
+ # topic names cannot have colons, so this captures the full topic name
+ all_topics = self._get_all_topics()
+ lookup_topic = ':%s' % self.name
+ for topic in all_topics:
+ if topic.endswith(lookup_topic):
+ return topic
+
+
+ def _create_topic(self):
+ self.changed = True
+ self.topic_created = True
+ if not self.check_mode:
+ self.connection.create_topic(self.name)
+ self.arn_topic = self._arn_topic_lookup()
+ while not self.arn_topic:
+ time.sleep(3)
+ self.arn_topic = self._arn_topic_lookup()
+
+
+ def _set_topic_attrs(self):
+ topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
+ ['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
+ ['Attributes']
+
+ if self.display_name and self.display_name != topic_attributes['DisplayName']:
+ self.changed = True
+ self.attributes_set.append('display_name')
+ if not self.check_mode:
+ self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
+ self.display_name)
+
+ if self.policy and self.policy != json.loads(topic_attributes['Policy']):
+ self.changed = True
+ self.attributes_set.append('policy')
+ if not self.check_mode:
+ self.connection.set_topic_attributes(self.arn_topic, 'Policy',
+ json.dumps(self.policy))
+
+ if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
+ self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
+ self.changed = True
+ self.attributes_set.append('delivery_policy')
+ if not self.check_mode:
+ self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
+ json.dumps(self.delivery_policy))
+
+
+ def _canonicalize_endpoint(self, protocol, endpoint):
+ if protocol == 'sms':
+ return re.sub('[^0-9]*', '', endpoint)
+ return endpoint
+
+
+ def _get_topic_subs(self):
+ next_token = None
+ while True:
+ response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
+ self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
+ ['ListSubscriptionsByTopicResult']['Subscriptions'])
+ next_token = response['ListSubscriptionsByTopicResponse'] \
+ ['ListSubscriptionsByTopicResult']['NextToken']
+ if not next_token:
+ break
+
+ def _set_topic_subs(self):
+ subscriptions_existing_list = []
+ desired_subscriptions = [(sub['protocol'],
+ self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
+ self.subscriptions]
+
+ if self.subscriptions_existing:
+ for sub in self.subscriptions_existing:
+ sub_key = (sub['Protocol'], sub['Endpoint'])
+ subscriptions_existing_list.append(sub_key)
+ if self.purge_subscriptions and sub_key not in desired_subscriptions and \
+ sub['SubscriptionArn'] != 'PendingConfirmation':
+ self.changed = True
+ self.subscriptions_deleted.append(sub_key)
+ if not self.check_mode:
+ self.connection.unsubscribe(sub['SubscriptionArn'])
+
+ for (protocol, endpoint) in desired_subscriptions:
+ if (protocol, endpoint) not in subscriptions_existing_list:
+ self.changed = True
+ self.subscriptions_added.append(sub)
+ if not self.check_mode:
+ self.connection.subscribe(self.arn_topic, protocol, endpoint)
+
+
+ def _delete_subscriptions(self):
+ # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
+ # https://forums.aws.amazon.com/thread.jspa?threadID=85993
+ for sub in self.subscriptions_existing:
+ if sub['SubscriptionArn'] != 'PendingConfirmation':
+ self.subscriptions_deleted.append(sub['SubscriptionArn'])
+ self.changed = True
+ if not self.check_mode:
+ self.connection.unsubscribe(sub['SubscriptionArn'])
+
+
+ def _delete_topic(self):
+ self.topic_deleted = True
+ self.changed = True
+ if not self.check_mode:
+ self.connection.delete_topic(self.arn_topic)
+
+
+ def ensure_ok(self):
+ self.arn_topic = self._arn_topic_lookup()
+ if not self.arn_topic:
+ self._create_topic()
+ self._set_topic_attrs()
+ self._get_topic_subs()
+ self._set_topic_subs()
+
+ def ensure_gone(self):
+ self.arn_topic = self._arn_topic_lookup()
+ if self.arn_topic:
+ self._get_topic_subs()
+ if self.subscriptions_existing:
+ self._delete_subscriptions()
+ self._delete_topic()
+
+
+ def get_info(self):
+ info = {
+ 'name': self.name,
+ 'state': self.state,
+ 'display_name': self.display_name,
+ 'policy': self.policy,
+ 'delivery_policy': self.delivery_policy,
+ 'subscriptions_new': self.subscriptions,
+ 'subscriptions_existing': self.subscriptions_existing,
+ 'subscriptions_deleted': self.subscriptions_deleted,
+ 'subscriptions_added': self.subscriptions_added,
+ 'subscriptions_purge': self.purge_subscriptions,
+ 'check_mode': self.check_mode,
+ 'topic_created': self.topic_created,
+ 'topic_deleted': self.topic_deleted,
+ 'attributes_set': self.attributes_set
+ }
+
+ return info
+
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present',
+ 'absent']),
+ display_name=dict(type='str', required=False),
+ policy=dict(type='dict', required=False),
+ delivery_policy=dict(type='dict', required=False),
+ subscriptions=dict(default=[], type='list', required=False),
+ purge_subscriptions=dict(type='bool', default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ display_name = module.params.get('display_name')
+ policy = module.params.get('policy')
+ delivery_policy = module.params.get('delivery_policy')
+ subscriptions = module.params.get('subscriptions')
+ purge_subscriptions = module.params.get('purge_subscriptions')
+ check_mode = module.check_mode
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ sns_topic = SnsTopicManager(module,
+ name,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ check_mode,
+ region,
+ **aws_connect_params)
+
+ if state == 'present':
+ sns_topic.ensure_ok()
+
+ elif state == 'absent':
+ sns_topic.ensure_gone()
+
+ sns_facts = dict(changed=sns_topic.changed,
+ sns_arn=sns_topic.arn_topic,
+ sns_topic=sns_topic.get_info())
+
+ module.exit_json(**sns_facts)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/sqs_queue.py b/lib/ansible/modules/extras/cloud/amazon/sqs_queue.py
new file mode 100644
index 0000000000..d00a3b638f
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/sqs_queue.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: sqs_queue
+short_description: Creates or deletes AWS SQS queues.
+description:
+ - Create or delete AWS SQS queues.
+ - Update attributes on existing queues.
+version_added: "2.0"
+author:
+ - Alan Loi (@loia)
+ - Fernando Jose Pando (@nand0p)
+ - Nadir Lloret (@nadirollo)
+requirements:
+ - "boto >= 2.33.0"
+options:
+ state:
+ description:
+ - Create or delete the queue
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Name of the queue.
+ required: true
+ default_visibility_timeout:
+ description:
+ - The default visibility timeout in seconds.
+ required: false
+ default: null
+ message_retention_period:
+ description:
+ - The message retention period in seconds.
+ required: false
+ default: null
+ maximum_message_size:
+ description:
+ - The maximum message size in bytes.
+ required: false
+ default: null
+ delivery_delay:
+ description:
+ - The delivery delay in seconds.
+ required: false
+ default: null
+ receive_message_wait_time:
+ description:
+ - The receive message wait time in seconds.
+ required: false
+ default: null
+ policy:
+ description:
+ - The json dict policy to attach to queue
+ required: false
+ default: null
+ version_added: "2.1"
+ redrive_policy:
+ description:
+ - json dict with the redrive_policy (see example)
+ required: false
+ default: null
+ version_added: "2.2"
+extends_documentation_fragment:
+ - aws
+ - ec2
+"""
+
+EXAMPLES = '''
+# Create SQS queue with redrive policy
+- sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ default_visibility_timeout: 120
+ message_retention_period: 86400
+ maximum_message_size: 1024
+ delivery_delay: 30
+ receive_message_wait_time: 20
+ policy: "{{ json_dict }}"
+ redrive_policy:
+ maxReceiveCount: 5
+ deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
+
+# Delete SQS queue
+- sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ state: absent
+'''
+
+try:
+ import boto.sqs
+ from boto.exception import BotoServerError, NoAuthHandlerFound
+ HAS_BOTO = True
+
+except ImportError:
+ HAS_BOTO = False
+
+
+def create_or_update_sqs_queue(connection, module):
+ queue_name = module.params.get('name')
+
+ queue_attributes = dict(
+ default_visibility_timeout=module.params.get('default_visibility_timeout'),
+ message_retention_period=module.params.get('message_retention_period'),
+ maximum_message_size=module.params.get('maximum_message_size'),
+ delivery_delay=module.params.get('delivery_delay'),
+ receive_message_wait_time=module.params.get('receive_message_wait_time'),
+ policy=module.params.get('policy'),
+ redrive_policy=module.params.get('redrive_policy')
+ )
+
+ result = dict(
+ region=module.params.get('region'),
+ name=queue_name,
+ )
+ result.update(queue_attributes)
+
+ try:
+ queue = connection.get_queue(queue_name)
+ if queue:
+ # Update existing
+ result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
+
+ else:
+ # Create new
+ if not module.check_mode:
+ queue = connection.create_queue(queue_name)
+ update_sqs_queue(queue, **queue_attributes)
+ result['changed'] = True
+
+ except BotoServerError:
+ result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def update_sqs_queue(queue,
+ check_mode=False,
+ default_visibility_timeout=None,
+ message_retention_period=None,
+ maximum_message_size=None,
+ delivery_delay=None,
+ receive_message_wait_time=None,
+ policy=None,
+ redrive_policy=None):
+ changed = False
+
+ changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'Policy', policy,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
+ check_mode=check_mode) or changed
+ return changed
+
+
+def set_queue_attribute(queue, attribute, value, check_mode=False):
+ if not value:
+ return False
+
+ try:
+ existing_value = queue.get_attributes(attributes=attribute)[attribute]
+ except:
+ existing_value = ''
+
+ # convert dict attributes to JSON strings (sort keys for comparing)
+ if attribute in ['Policy', 'RedrivePolicy']:
+ value = json.dumps(value, sort_keys=True)
+ if existing_value:
+ existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
+
+ if str(value) != existing_value:
+ if not check_mode:
+ queue.set_attribute(attribute, value)
+ return True
+
+ return False
+
+
+def delete_sqs_queue(connection, module):
+ queue_name = module.params.get('name')
+
+ result = dict(
+ region=module.params.get('region'),
+ name=queue_name,
+ )
+
+ try:
+ queue = connection.get_queue(queue_name)
+ if queue:
+ if not module.check_mode:
+ connection.delete_queue(queue)
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ except BotoServerError:
+ result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ default_visibility_timeout=dict(type='int'),
+ message_retention_period=dict(type='int'),
+ maximum_message_size=dict(type='int'),
+ delivery_delay=dict(type='int'),
+ receive_message_wait_time=dict(type='int'),
+ policy=dict(type='dict', required=False),
+ redrive_policy=dict(type='dict', required=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
+
+ except (NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_or_update_sqs_queue(connection, module)
+ elif state == 'absent':
+ delete_sqs_queue(connection, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/sts_assume_role.py b/lib/ansible/modules/extras/cloud/amazon/sts_assume_role.py
new file mode 100644
index 0000000000..a3fab12137
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/sts_assume_role.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials
+version_added: "2.0"
+author: Boris Ekelchik (@bekelchik)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
+ required: true
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail
+ required: true
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ required: false
+ default: null
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.
+ required: false
+ default: null
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ required: false
+ default: null
+ mfa_serial_number:
+ description:
+ - he identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ required: false
+ default: null
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ required: false
+ default: null
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+try:
+ import boto.sts
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def assume_role_policy(connection, module):
+
+ role_arn = module.params.get('role_arn')
+ role_session_name = module.params.get('role_session_name')
+ policy = module.params.get('policy')
+ duration_seconds = module.params.get('duration_seconds')
+ external_id = module.params.get('external_id')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ try:
+ assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token)
+ changed = True
+ except BotoServerError, e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ role_arn = dict(required=True, default=None),
+ role_session_name = dict(required=True, default=None),
+ duration_seconds = dict(required=False, default=None, type='int'),
+ external_id = dict(required=False, default=None),
+ policy = dict(required=False, default=None),
+ mfa_serial_number = dict(required=False, default=None),
+ mfa_token = dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.sts, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ try:
+ assume_role_policy(connection, module)
+ except BotoServerError, e:
+ module.fail_json(msg=e)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/amazon/sts_session_token.py b/lib/ansible/modules/extras/cloud/amazon/sts_session_token.py
new file mode 100644
index 0000000000..dc284deaec
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/amazon/sts_session_token.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: sts_session_token
+short_description: Obtain a session token from the AWS Security Token Service
+description:
+ - Obtain a session token from the AWS Security Token Service
+version_added: "2.2"
+author: Victor Costan (@pwnall)
+options:
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values.
+ required: false
+ default: null
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
+ required: false
+ default: null
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the user requires MFA.
+ required: false
+ default: null
+notes:
+ - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - boto3
+ - botocore
+'''
+
+RETURN = """
+sts_creds:
+ description: The Credentials object returned by the AWS Security Token Service
+ returned: always
+ type: list
+ sample:
+ access_key: ASXXXXXXXXXXXXXXXXXX
+ expiration: "2016-04-08T11:59:47+00:00"
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+"""
+
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get a session token (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
+sts_session_token:
+ duration: 3600
+register: session_credentials
+
+# Use the session token obtained above to tag an instance in account 123456789012
+ec2_tag:
+ aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
+ aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
+ security_token: "{{ session_credentials.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def normalize_credentials(credentials):
+ access_key = credentials.get('AccessKeyId', None)
+ secret_key = credentials.get('SecretAccessKey', None)
+ session_token = credentials.get('SessionToken', None)
+ expiration = credentials.get('Expiration', None)
+ return {
+ 'access_key': access_key,
+ 'secret_key': secret_key,
+ 'session_token': session_token,
+ 'expiration': expiration
+ }
+
+def get_session_token(connection, module):
+ duration_seconds = module.params.get('duration_seconds')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ args = {}
+ if duration_seconds is not None:
+ args['DurationSeconds'] = duration_seconds
+ if mfa_serial_number is not None:
+ args['SerialNumber'] = mfa_serial_number
+ if mfa_token is not None:
+ args['TokenCode'] = mfa_token
+
+ try:
+ response = connection.get_session_token(**args)
+ changed = True
+ except ClientError, e:
+ module.fail_json(msg=e)
+
+ credentials = normalize_credentials(response.get('Credentials', {}))
+ module.exit_json(changed=changed, sts_creds=credentials)
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ duration_seconds = dict(required=False, default=None, type='int'),
+ mfa_serial_number = dict(required=False, default=None),
+ mfa_token = dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required.')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ get_session_token(connection, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/atomic/__init__.py b/lib/ansible/modules/extras/cloud/atomic/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/atomic/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/atomic/atomic_host.py b/lib/ansible/modules/extras/cloud/atomic/atomic_host.py
new file mode 100644
index 0000000000..dc098e6721
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/atomic/atomic_host.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public licenses
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION='''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform
+ - Rebooting of Atomic host platform should be done outside this module
+version_added: "2.2"
+author: "Saravanan KR @krsacme"
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file)
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed. Providing ```latest``` will upgrade to the latest available version.
+ required: false
+ default: latest
+ aliases: ["version"]
+'''
+
+EXAMPLES = '''
+
+# Upgrade the atomic host platform to the latest version (atomic host upgrade)
+- atomic_host: revision=latest
+
+# Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+- atomic_host: revision=23.130
+
+'''
+
+RETURN = '''
+msg:
+ description: The command standard output
+ returned: always
+ type: string
+ sample: 'Already on latest'
+'''
+
+def core(module):
+ revision = module.params['revision']
+ args = []
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = ['atomic', 'host', 'upgrade']
+ else:
+ args = ['atomic', 'host', 'deploy', revision]
+
+ out = {}
+ err = {}
+ rc = 0
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ revision = dict(default='latest', required=False, aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/atomic/atomic_image.py b/lib/ansible/modules/extras/cloud/atomic/atomic_image.py
new file mode 100644
index 0000000000..cebd97a7d4
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/atomic/atomic_image.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION='''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform
+ - Allows to execute the commands on the container images
+version_added: "2.2"
+author: "Saravanan KR @krsacme"
+notes:
+ - Host should be support ```atomic``` command
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ name:
+ description:
+ - Name of the container image
+ required: True
+ default: null
+ state:
+ description:
+ - The state of the container image.
+ - The state ```latest``` will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ required: False
+ choices: ["present", "absent", "latest"]
+ default: latest
+ started:
+ description:
+ - Start or Stop the continer
+ required: False
+ choices: ["yes", "no"]
+ default: yes
+'''
+
+EXAMPLES = '''
+
+# Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+- atomic_image: name=rhel7/rsyslog state=latest
+
+'''
+
+RETURN = '''
+msg:
+ description: The command standard output
+ returned: always
+ type: string
+ sample: [u'Using default tag: latest ...']
+'''
+
+def do_upgrade(module, image):
+ args = ['atomic', 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = ['atomic', 'run', image]
+ else:
+ args = ['atomic', 'install', image]
+ elif state == 'absent':
+ args = ['atomic', 'uninstall', image]
+
+ out = {}
+ err = {}
+ rc = 0
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(default=None, required=True),
+ state = dict(default='latest', choices=['present', 'absent', 'latest']),
+ started = dict(default='yes', type='bool'),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/azure/__init__.py b/lib/ansible/modules/extras/cloud/azure/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/azure/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/azure/azure_rm_deployment.py b/lib/ansible/modules/extras/cloud/azure/azure_rm_deployment.py
new file mode 100644
index 0000000000..b9986207ab
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/azure/azure_rm_deployment.py
@@ -0,0 +1,661 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+---
+module: azure_rm_deployment
+
+short_description: Create or destroy Azure Resource Manager template deployments
+
+version_added: "2.1"
+
+description:
+ - "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
+ You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
+ For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
+
+options:
+ resource_group_name:
+ description:
+ - The resource group name to use or create to host the deployed template
+ required: true
+ location:
+ description:
+ - The geo-locations in which the resource group will be located.
+ required: false
+ default: westus
+ deployment_mode:
+ description:
+ - In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
+ In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
+ required: false
+ default: complete
+ choices:
+ - complete
+ - incremental
+ state:
+ description:
+ - If state is "present", template will be created. If state is "present" and if deployment exists, it will be
+ updated. If state is "absent", stack will be removed.
+ default: present
+ required: false
+ choices:
+ - present
+ - absent
+ template:
+ description:
+ - A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
+ Either one of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ template_link:
+ description:
+ - Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
+ of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ parameters:
+ description:
+ - A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
+ with 'parameters_link'. Either one of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ parameters_link:
+ description:
+ - Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
+ one of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ deployment_name:
+ description:
+ - The name of the deployment to be tracked in the resource group deployment history. Re-using a deployment name
+ will overwrite the previous value in the resource group's deployment history.
+ default: ansible-arm
+ wait_for_deployment_completion:
+ description:
+ - Whether or not to block until the deployment has completed.
+ default: yes
+ choices: ['yes', 'no']
+ wait_for_deployment_polling_period:
+ description:
+ - Time (in seconds) to wait between polls when waiting for deployment completion.
+ default: 10
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - David Justice (@devigned)
+ - Laurent Mazuel (@lmazuel)
+ - Andre Price (@obsoleted)
+
+'''
+
+EXAMPLES = '''
+# Destroy a template deployment
+- name: Destroy Azure Deploy
+ azure_rm_deployment:
+ state: absent
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+
+# Create or update a template deployment based on uris using parameter and template links
+- name: Create Azure Deploy
+ azure_rm_deployment:
+ state: present
+ resource_group_name: dev-ops-cle
+ template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
+ parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
+
+# Create or update a template deployment based on a uri to the template and parameters specified inline.
+# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
+# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Destroy Azure Deploy
+ azure_rm_deployment:
+ state: absent
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+
+ - name: Create Azure Deploy
+ azure_rm_deployment:
+ state: present
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+ parameters:
+ newStorageAccountName:
+ value: devopsclestorage1
+ adminUsername:
+ value: devopscle
+ dnsNameForPublicIP:
+ value: devopscleazure
+ location:
+ value: West US
+ vmSize:
+ value: Standard_A2
+ vmName:
+ value: ansibleSshVm
+ sshKeyData:
+ value: YOUR_SSH_PUBLIC_KEY
+ template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
+ register: azure
+
+ - name: Add new instance to host group
+ add_host: hostname={{ item['ips'][0].public_ip }} groupname=azure_vms
+ with_items: azure.deployment.instances
+
+ - hosts: azure_vms
+ user: devopscle
+ tasks:
+ - name: Wait for SSH to come up
+ wait_for: port=22 timeout=2000 state=started
+ - name: echo the hostname of the vm
+ shell: hostname
+
+# Deploy an Azure WebApp running a hello world'ish node app
+- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
+ azure_rm_deployment:
+ state: present
+ subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
+ resource_group_name: dev-ops-cle-webapp
+ parameters:
+ repoURL:
+ value: 'https://github.com/devigned/az-roadshow-oss.git'
+ siteName:
+ value: devopscleweb
+ hostingPlanName:
+ value: someplan
+ siteLocation:
+ value: westus
+ sku:
+ value: Standard
+ template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
+
+# Create or update a template deployment based on an inline template and parameters
+- name: Create Azure Deploy
+ azure_rm_deploy:
+ state: present
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+
+ template:
+ $schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
+ contentVersion: "1.0.0.0"
+ parameters:
+ newStorageAccountName:
+ type: "string"
+ metadata:
+ description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
+ adminUsername:
+ type: "string"
+ metadata:
+ description: "User name for the Virtual Machine."
+ adminPassword:
+ type: "securestring"
+ metadata:
+ description: "Password for the Virtual Machine."
+ dnsNameForPublicIP:
+ type: "string"
+ metadata:
+ description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
+ ubuntuOSVersion:
+ type: "string"
+ defaultValue: "14.04.2-LTS"
+ allowedValues:
+ - "12.04.5-LTS"
+ - "14.04.2-LTS"
+ - "15.04"
+ metadata:
+ description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
+ variables:
+ location: "West US"
+ imagePublisher: "Canonical"
+ imageOffer: "UbuntuServer"
+ OSDiskName: "osdiskforlinuxsimple"
+ nicName: "myVMNic"
+ addressPrefix: "192.0.2.0/24"
+ subnetName: "Subnet"
+ subnetPrefix: "10.0.0.0/24"
+ storageAccountType: "Standard_LRS"
+ publicIPAddressName: "myPublicIP"
+ publicIPAddressType: "Dynamic"
+ vmStorageAccountContainerName: "vhds"
+ vmName: "MyUbuntuVM"
+ vmSize: "Standard_D1"
+ virtualNetworkName: "MyVNET"
+ vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
+ subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
+ resources:
+ -
+ type: "Microsoft.Storage/storageAccounts"
+ name: "[parameters('newStorageAccountName')]"
+ apiVersion: "2015-05-01-preview"
+ location: "[variables('location')]"
+ properties:
+ accountType: "[variables('storageAccountType')]"
+ -
+ apiVersion: "2015-05-01-preview"
+ type: "Microsoft.Network/publicIPAddresses"
+ name: "[variables('publicIPAddressName')]"
+ location: "[variables('location')]"
+ properties:
+ publicIPAllocationMethod: "[variables('publicIPAddressType')]"
+ dnsSettings:
+ domainNameLabel: "[parameters('dnsNameForPublicIP')]"
+ -
+ type: "Microsoft.Network/virtualNetworks"
+ apiVersion: "2015-05-01-preview"
+ name: "[variables('virtualNetworkName')]"
+ location: "[variables('location')]"
+ properties:
+ addressSpace:
+ addressPrefixes:
+ - "[variables('addressPrefix')]"
+ subnets:
+ -
+ name: "[variables('subnetName')]"
+ properties:
+ addressPrefix: "[variables('subnetPrefix')]"
+ -
+ type: "Microsoft.Network/networkInterfaces"
+ apiVersion: "2015-05-01-preview"
+ name: "[variables('nicName')]"
+ location: "[variables('location')]"
+ dependsOn:
+ - "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
+ - "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
+ properties:
+ ipConfigurations:
+ -
+ name: "ipconfig1"
+ properties:
+ privateIPAllocationMethod: "Dynamic"
+ publicIPAddress:
+ id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
+ subnet:
+ id: "[variables('subnetRef')]"
+ -
+ type: "Microsoft.Compute/virtualMachines"
+ apiVersion: "2015-06-15"
+ name: "[variables('vmName')]"
+ location: "[variables('location')]"
+ dependsOn:
+ - "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
+ - "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
+ properties:
+ hardwareProfile:
+ vmSize: "[variables('vmSize')]"
+ osProfile:
+ computername: "[variables('vmName')]"
+ adminUsername: "[parameters('adminUsername')]"
+ adminPassword: "[parameters('adminPassword')]"
+ storageProfile:
+ imageReference:
+ publisher: "[variables('imagePublisher')]"
+ offer: "[variables('imageOffer')]"
+ sku: "[parameters('ubuntuOSVersion')]"
+ version: "latest"
+ osDisk:
+ name: "osdisk"
+ vhd:
+ uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]"
+ caching: "ReadWrite"
+ createOption: "FromImage"
+ networkProfile:
+ networkInterfaces:
+ -
+ id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
+ diagnosticsProfile:
+ bootDiagnostics:
+ enabled: "true"
+ storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
+ parameters:
+ newStorageAccountName:
+ value: devopsclestorage
+ adminUsername:
+ value: devopscle
+ adminPassword:
+ value: Password1!
+ dnsNameForPublicIP:
+ value: devopscleazure
+'''
+
+RETURN = '''
+deployment:
+ description: Deployment details
+ type: dict
+ returned: always
+ sample:
+ group_name:
+ description: Name of the resource group
+ type: string
+ returned: always
+ id:
+ description: The Azure ID of the deployment
+ type: string
+ returned: always
+ instances:
+ description: Provides the public IP addresses for each VM instance.
+ type: list
+ returned: always
+ name:
+ description: Name of the deployment
+ type: string
+ returned: always
+ outputs:
+ description: Dictionary of outputs received from the deployment
+ type: dict
+ returned: always
+'''
+
+PREREQ_IMPORT_ERROR = None
+
+try:
+ import time
+ import yaml
+except ImportError as exc:
+ IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
+
+from ansible.module_utils.azure_rm_common import *
+
+try:
+ from itertools import chain
+ from azure.common.credentials import ServicePrincipalCredentials
+ from azure.common.exceptions import CloudError
+ from azure.mgmt.resource.resources.models import (DeploymentProperties,
+ ParametersLink,
+ TemplateLink,
+ Deployment,
+ ResourceGroup,
+ Dependency)
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.network import NetworkManagementClient
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMDeploymentManager(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ template=dict(type='dict', default=None),
+ parameters=dict(type='dict', default=None),
+ template_link=dict(type='str', default=None),
+ parameters_link=dict(type='str', default=None),
+ location=dict(type='str', default="westus"),
+ deployment_mode=dict(type='str', default='complete', choices=['complete', 'incremental']),
+ deployment_name=dict(type='str', default="ansible-arm"),
+ wait_for_deployment_completion=dict(type='bool', default=True),
+ wait_for_deployment_polling_period=dict(type='int', default=10)
+ )
+
+ mutually_exclusive = [('template', 'template_link'),
+ ('parameters', 'parameters_link')]
+
+ self.resource_group_name = None
+ self.state = None
+ self.template = None
+ self.parameters = None
+ self.template_link = None
+ self.parameters_link = None
+ self.location = None
+ self.deployment_mode = None
+ self.deployment_name = None
+ self.wait_for_deployment_completion = None
+ self.wait_for_deployment_polling_period = None
+ self.tags = None
+
+ self.results = dict(
+ deployment=dict(),
+ changed=False,
+ msg=""
+ )
+
+ super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=False)
+
+ def exec_module(self, **kwargs):
+
+ if PREREQ_IMPORT_ERROR:
+ self.fail(PREREQ_IMPORT_ERROR)
+
+ for key in self.module_arg_spec.keys() + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ if self.state == 'present':
+ deployment = self.deploy_template()
+ self.results['deployment'] = dict(
+ name=deployment.name,
+ group_name=self.resource_group_name,
+ id=deployment.id,
+ outputs=deployment.properties.outputs,
+ instances=self._get_instances(deployment)
+ )
+ self.results['changed'] = True
+ self.results['msg'] = 'deployment succeeded'
+ else:
+ if self.resource_group_exists(self.resource_group_name):
+ self.destroy_resource_group()
+ self.results['changed'] = True
+ self.results['msg'] = "deployment deleted"
+
+ return self.results
+
+ def deploy_template(self):
+ """
+ Deploy the targeted template and parameters
+ :param module: Ansible module containing the validated configuration for the deployment template
+ :param client: resource management client for azure
+ :param conn_info: connection info needed
+ :return:
+ """
+
+ deploy_parameter = DeploymentProperties(self.deployment_mode)
+ if not self.parameters_link:
+ deploy_parameter.parameters = self.parameters
+ else:
+ deploy_parameter.parameters_link = ParametersLink(
+ uri=self.parameters_link
+ )
+ if not self.template_link:
+ deploy_parameter.template = self.template
+ else:
+ deploy_parameter.template_link = TemplateLink(
+ uri=self.template_link
+ )
+
+ params = ResourceGroup(location=self.location, tags=self.tags)
+
+ try:
+ self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
+ except CloudError as exc:
+ self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
+ (exc.status_code, exc.message))
+ try:
+ result = self.rm_client.deployments.create_or_update(self.resource_group_name,
+ self.deployment_name,
+ deploy_parameter)
+
+ deployment_result = self.get_poller_result(result)
+ if self.wait_for_deployment_completion:
+ while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
+ 'Succeeded']:
+ time.sleep(self.wait_for_deployment_polling_period)
+ deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
+ except CloudError as exc:
+ failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
+ self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
+ self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
+ failed_deployment_operations=failed_deployment_operations)
+
+ if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
+ self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
+ failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
+ self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
+ failed_deployment_operations=failed_deployment_operations)
+
+ return deployment_result
+
+ def destroy_resource_group(self):
+ """
+ Destroy the targeted resource group
+ """
+ try:
+ result = self.rm_client.resource_groups.delete(self.resource_group_name)
+ result.wait() # Blocking wait till the delete is finished
+ except CloudError as e:
+ if e.status_code == 404 or e.status_code == 204:
+ return
+ else:
+ self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
+ (e.status_code, e.message))
+
+ def resource_group_exists(self, resource_group):
+ '''
+ Return True/False based on existence of requested resource group.
+
+ :param resource_group: string. Name of a resource group.
+ :return: boolean
+ '''
+ try:
+ self.rm_client.resource_groups.get(resource_group)
+ except CloudError:
+ return False
+ return True
+
+ def _get_failed_nested_operations(self, current_operations):
+ new_operations = []
+ for operation in current_operations:
+ if operation.properties.provisioning_state == 'Failed':
+ new_operations.append(operation)
+ if operation.properties.target_resource and \
+ 'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
+ nested_deployment = operation.properties.target_resource.resource_name
+ try:
+ nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
+ nested_deployment)
+ except CloudError as exc:
+ self.fail("List nested deployment operations failed with status code: %s and message: %s" %
+ (e.status_code, e.message))
+ new_nested_operations = self._get_failed_nested_operations(nested_operations)
+ new_operations += new_nested_operations
+ return new_operations
+
+ def _get_failed_deployment_operations(self, deployment_name):
+ results = []
+ # time.sleep(15) # there is a race condition between when we ask for deployment status and when the
+ # # status is available.
+
+ try:
+ operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name)
+ except CloudError as exc:
+ self.fail("Get deployment failed with status code: %s and message: %s" %
+ (exc.status_code, exc.message))
+ try:
+ results = [
+ dict(
+ id=op.id,
+ operation_id=op.operation_id,
+ status_code=op.properties.status_code,
+ status_message=op.properties.status_message,
+ target_resource=dict(
+ id=op.properties.target_resource.id,
+ resource_name=op.properties.target_resource.resource_name,
+ resource_type=op.properties.target_resource.resource_type
+ ) if op.properties.target_resource else None,
+ provisioning_state=op.properties.provisioning_state,
+ )
+ for op in self._get_failed_nested_operations(operations)
+ ]
+ except:
+ # If we fail here, the original error gets lost and user receives wrong error message/stacktrace
+ pass
+ self.log(dict(failed_deployment_operations=results), pretty_print=True)
+ return results
+
+ def _get_instances(self, deployment):
+ dep_tree = self._build_hierarchy(deployment.properties.dependencies)
+ vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
+ vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
+ for vm in vms]
+ vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
+ for vm, nics in vms_and_nics]
+ return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
+ for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
+
+ def _get_dependencies(self, dep_tree, resource_type):
+ matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
+ for child_tree in [value['children'] for value in dep_tree.values()]:
+ matches += self._get_dependencies(child_tree, resource_type)
+ return matches
+
+ def _build_hierarchy(self, dependencies, tree=None):
+ tree = dict(top=True) if tree is None else tree
+ for dep in dependencies:
+ if dep.resource_name not in tree:
+ tree[dep.resource_name] = dict(dep=dep, children=dict())
+ if isinstance(dep, Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
+ self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
+
+ if 'top' in tree:
+ tree.pop('top', None)
+ keys = list(tree.keys())
+ for key1 in keys:
+ for key2 in keys:
+ if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
+ tree[key2]['children'][key1] = tree[key1]
+ tree.pop(key1)
+ return tree
+
+ def _get_ip_dict(self, ip):
+ ip_dict = dict(name=ip.name,
+ id=ip.id,
+ public_ip=ip.ip_address,
+ public_ip_allocation_method=str(ip.public_ip_allocation_method)
+ )
+ if ip.dns_settings:
+ ip_dict['dns_settings'] = {
+ 'domain_name_label':ip.dns_settings.domain_name_label,
+ 'fqdn':ip.dns_settings.fqdn
+ }
+ return ip_dict
+
+ def _nic_to_public_ips_instance(self, nics):
+ return [self.network_client.public_ip_addresses.get(self.resource_group_name, public_ip_id.split('/')[-1])
+ for nic_obj in [self.network_client.network_interfaces.get(self.resource_group_name,
+ nic['dep'].resource_name) for nic in nics]
+ for public_ip_id in [ip_conf_instance.public_ip_address.id
+ for ip_conf_instance in nic_obj.ip_configurations
+ if ip_conf_instance.public_ip_address]]
+
+
+def main():
+ AzureRMDeploymentManager()
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/cloud/centurylink/__init__.py b/lib/ansible/modules/extras/cloud/centurylink/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/__init__.py
@@ -0,0 +1 @@
+
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_aa_policy.py b/lib/ansible/modules/extras/cloud/centurylink/clc_aa_policy.py
new file mode 100644
index 0000000000..681e71cb3a
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_aa_policy.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ required: True
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ required: True
+ state:
+ description:
+ - Whether to create or delete the policy.
+ required: False
+ default: present
+ choices: ['present','absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ clc_aa_policy:
+ name: 'Hammer Time'
+ location: 'UK3'
+ state: present
+ register: policy
+
+ - name: debug
+ debug: var=policy
+
+---
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ clc_aa_policy:
+ name: 'Hammer Time'
+ location: 'UK3'
+ state: absent
+ register: policy
+
+ - name: debug
+ debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ wait=dict(default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_alert_policy.py b/lib/ansible/modules/extras/cloud/centurylink/clc_alert_policy.py
new file mode 100644
index 0000000000..b8817b6618
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_alert_policy.py
@@ -0,0 +1,537 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+version_added: "2.0"
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ required: True
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ required: False
+ default: None
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ required: False
+ default: None
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ required: False
+ default: None
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ required: False
+ default: None
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ required: False
+ default: None
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ required: False
+ default: None
+ state:
+ description:
+ - Whether to create or delete the policy.
+ required: False
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: debug
+ debug: var=policy
+
+---
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: debug
+ debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(default=None),
+ id=dict(default=None),
+ alias=dict(required=True, default=None),
+ alert_recipients=dict(type='list', default=None),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk'],
+ default=None),
+ duration=dict(type='str', default=None),
+ threshold=dict(type='int', default=None),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_blueprint_package.py b/lib/ansible/modules/extras/cloud/centurylink/clc_blueprint_package.py
new file mode 100644
index 0000000000..4e8a392495
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_blueprint_package.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ required: True
+ package_id:
+ description:
+ - The package id of the blue print.
+ required: True
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ default: {}
+ required: False
+ state:
+ description:
+ - Whether to install or un-install the package. Currently it supports only "present" for install action.
+ required: False
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True),
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_firewall_policy.py b/lib/ansible/modules/extras/cloud/centurylink/clc_firewall_policy.py
new file mode 100644
index 0000000000..c26128a40b
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_firewall_policy.py
@@ -0,0 +1,597 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall polices on Centurylink Cloud
+version_added: "2.0"
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ required: True
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ default: present
+ required: False
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present"
+ default: None
+ required: False
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ default: None
+ required: False
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ default: None
+ required: False
+ choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ default: None
+ required: False
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ required: True
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ default: None
+ required: False
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+---
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: 'c62105233d7a4231bd2e91b9c791e43e1'
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: string
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import urlparse
+from time import sleep
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True, default=None),
+ destination_account_alias=dict(default=None),
+ firewall_policy_id=dict(default=None),
+ ports=dict(default=None, type='list'),
+ source=dict(defualt=None, type='list'),
+ destination=dict(defualt=None, type='list'),
+ wait=dict(default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(defualt=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse.urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_group.py b/lib/ansible/modules/extras/cloud/centurylink/clc_group.py
new file mode 100644
index 0000000000..f30b37d6ec
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_group.py
@@ -0,0 +1,513 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the Server Group
+ required: True
+ description:
+ description:
+ - A description of the Server Group
+ required: False
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ required: False
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ required: False
+ state:
+ description:
+ - Whether to create or delete the group
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ clc_group:
+ name: 'My Cool Server Group'
+ parent: 'Default Group'
+ state: present
+ register: clc
+
+ - name: debug
+ debug: var=clc
+
+# Delete a Server Group
+
+---
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ clc_group:
+ name: 'My Cool Server Group'
+ parent: 'Default Group'
+ state: absent
+ register: clc
+
+ - name: debug
+ debug: var=clc
+
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ location=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ assert self.root_group, "Implementation Error: Root Group not set"
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_loadbalancer.py b/lib/ansible/modules/extras/cloud/centurylink/clc_loadbalancer.py
new file mode 100644
index 0000000000..abb421c755
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_loadbalancer.py
@@ -0,0 +1,936 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ required: True
+ description:
+ description:
+ - A description for the loadbalancer
+ required: False
+ default: None
+ alias:
+ description:
+ - The alias of your CLC Account
+ required: True
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ required: True
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ required: False
+ default: None
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ required: False
+ default: None
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ required: False
+ default: None
+ choices: [80, 443]
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ required: False
+ default: []
+ status:
+ description:
+ - The status of the loadbalancer
+ required: False
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ required: False
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - { 'ipAddress': '10.11.22.123', 'privatePort': 80 }
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - { 'ipAddress': '10.11.22.234', 'privatePort': 80 }
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - { 'ipAddress': '10.11.22.234', 'privatePort': 80 }
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - { 'ipAddress': '10.11.22.123', 'privatePort': 80 }
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - { 'ipAddress': '10.11.22.123', 'privatePort': 80 }
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+from time import sleep
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if not node in nodes:
+ changed = True
+ nodes.append(node)
+ if changed == True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed == True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[]),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_modify_server.py b/lib/ansible/modules/extras/cloud/centurylink/clc_modify_server.py
new file mode 100644
index 0000000000..a676248ffd
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_modify_server.py
@@ -0,0 +1,977 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: modify servers in CenturyLink Cloud.
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ required: True
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ required: False
+ default: None
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ required: False
+ default: None
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ required: False
+ default: None
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ required: False
+ default: None
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ required: False
+ default: None
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ required: False
+ default: None
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ default: 'present'
+ required: False
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [ True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: set the cpu count to 4 on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: set the memory to 8GB on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: set the anti affinity policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: remove the anti affinity policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: add the alert policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: remove the alert policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: set the memory to 16GB and cpu to 8 core on a lust if servers
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_publicip.py b/lib/ansible/modules/extras/cloud/centurylink/clc_publicip.py
new file mode 100644
index 0000000000..9c21a9a615
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_publicip.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ required: False
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ required: False
+ default: None
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ required: True
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ default: present
+ choices: ['present', 'absent']
+ required: False
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ clc_publicip:
+ protocol: 'TCP'
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: debug
+ debug: var=clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: debug
+ debug: var=clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_server.py b/lib/ansible/modules/extras/cloud/centurylink/clc_server.py
new file mode 100644
index 0000000000..3cb4404012
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_server.py
@@ -0,0 +1,1585 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ required: False
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ required: False
+ default: False
+ choices: [False, True]
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ required: False
+ default: None
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ required: False
+ default: None
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ required: False
+ default: None
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ required: False
+ default: None
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ required: False
+ default: None
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ required: False
+ default: 1
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many severs to deploy.
+ required: False
+ default: None
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ required: False
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ default: None
+ required: False
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ default: []
+ required: False
+ description:
+ description:
+ - The description to set for the server.
+ default: None
+ required: False
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ default: None
+ required: False
+ group:
+ description:
+ - The Server Group to create servers under.
+ default: 'Default Group'
+ required: False
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ default: None
+ required: False
+ location:
+ description:
+ - The Datacenter to create servers in.
+ default: None
+ required: False
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ default: False
+ required: False
+ choices: [True, False]
+ memory:
+ description:
+ - Memory in GB.
+ default: 1
+ required: False
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ default: None
+ required: False
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ default: None
+ required: False
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ default: []
+ required: False
+ password:
+ description:
+ - Password for the administrator / root user
+ default: None
+ required: False
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ default: None
+ required: False
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ required: False
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ default: []
+ required: False
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ default: None
+ required: False
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ default: []
+ required: False
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ default: None
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ default: 'standard'
+ required: False
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ default: None
+ required: False
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ default: None
+ required: False
+ type:
+ description:
+ - The type of server to create.
+ default: 'standard'
+ required: False
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ default: None
+ required: False
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ default: None
+ required: False
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: 'Default Group'
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: 'Default Group'
+ group: 'Default Group'
+
+- name: Stop a Server
+ clc_server:
+ server_ids: ['UC1ACCT-TEST01']
+ state: stopped
+
+- name: Start a Server
+ clc_server:
+ server_ids: ['UC1ACCT-TEST01']
+ state: started
+
+- name: Delete a Server
+ clc_server:
+ server_ids: ['UC1ACCT-TEST01']
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+from time import sleep
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(default=None),
+ cpu=dict(default=1),
+ memory=dict(default=1),
+ alias=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ip_address=dict(default=None),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(default=None),
+ secondary_dns=dict(default=None),
+ additional_disks=dict(type='list', default=[]),
+ custom_fields=dict(type='list', default=[]),
+ ttl=dict(default=None),
+ managed_os=dict(type='bool', default=False),
+ description=dict(default=None),
+ source_server_password=dict(default=None),
+ cpu_autoscale_policy_id=dict(default=None),
+ anti_affinity_policy_id=dict(default=None),
+ anti_affinity_policy_name=dict(default=None),
+ alert_policy_id=dict(default=None),
+ alert_policy_name=dict(default=None),
+ packages=dict(type='list', default=[]),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', default=None),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[]),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[]),
+ configuration_id=dict(default=None),
+ os_type=dict(default=None,
+ choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException as ex:
+ module.fail_json(
+ msg=str(
+ "Unable to find location: {0}".format(location)))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/centurylink/clc_server_snapshot.py b/lib/ansible/modules/extras/cloud/centurylink/clc_server_snapshot.py
new file mode 100644
index 0000000000..6c7e8920e4
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/centurylink/clc_server_snapshot.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ required: True
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ default: 7
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: True
+ state: present
+
+- name: Restore server snapshot
+ clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: restore
+
+- name: Delete server snapshot
+ clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ expiration_days=dict(default=7),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/__init__.py b/lib/ansible/modules/extras/cloud/cloudstack/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_account.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_account.py
new file mode 100644
index 0000000000..d4b27dea79
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_account.py
@@ -0,0 +1,381 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_account
+short_description: Manages accounts on Apache CloudStack based clouds.
+description:
+ - Create, disable, lock, enable and remove accounts.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of account.
+ required: true
+ username:
+ description:
+ - Username of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ password:
+ description:
+ - Password of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ first_name:
+ description:
+ - First name of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ last_name:
+ description:
+ - Last name of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ email:
+ description:
+ - Email of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ timezone:
+ description:
+ - Timezone of the user to be created if account did not exist.
+ required: false
+ default: null
+ network_domain:
+ description:
+ - Network domain of the account.
+ required: false
+ default: null
+ account_type:
+ description:
+ - Type of the account.
+ required: false
+ default: 'user'
+ choices: [ 'user', 'root_admin', 'domain_admin' ]
+ domain:
+ description:
+ - Domain the account is related to.
+ required: false
+ default: 'ROOT'
+ state:
+ description:
+ - State of the account.
+ - C(unlocked) is an alias for C(enabled).
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create an account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ username: customer_xy
+ password: S3Cur3
+ last_name: Doe
+ first_name: John
+ email: john.doe@example.com
+ domain: CUSTOMERS
+
+# Lock an existing account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: locked
+
+# Disable an existing account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: disabled
+
+# Enable an existing account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: enabled
+
+# Remove an account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the account.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+name:
+ description: Name of the account.
+ returned: success
+ type: string
+ sample: linus@example.com
+account_type:
+ description: Type of the account.
+ returned: success
+ type: string
+ sample: user
+state:
+ description: State of the account.
+ returned: success
+ type: string
+ sample: enabled
+network_domain:
+ description: Network domain of the account.
+ returned: success
+ type: string
+ sample: example.local
+domain:
+ description: Domain the account is related.
+ returned: success
+ type: string
+ sample: ROOT
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackAccount(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackAccount, self).__init__(module)
+ self.returns = {
+ 'networkdomain': 'network_domain',
+ }
+ self.account = None
+ self.account_types = {
+ 'user': 0,
+ 'root_admin': 1,
+ 'domain_admin': 2,
+ }
+
+ def get_account_type(self):
+ account_type = self.module.params.get('account_type')
+ return self.account_types[account_type]
+
+ def get_account(self):
+ if not self.account:
+ args = {
+ 'listall': True,
+ 'domainid': self.get_domain(key='id'),
+ }
+ accounts = self.cs.listAccounts(**args)
+ if accounts:
+ account_name = self.module.params.get('name')
+ for a in accounts['account']:
+ if account_name == a['name']:
+ self.account = a
+ break
+
+ return self.account
+
+ def enable_account(self):
+ account = self.get_account()
+ if not account:
+ account = self.present_account()
+
+ if account['state'].lower() != 'enabled':
+ self.result['changed'] = True
+ args = {
+ 'id': account['id'],
+ 'account': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id')
+ }
+ if not self.module.check_mode:
+ res = self.cs.enableAccount(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ account = res['account']
+ return account
+
+ def lock_account(self):
+ return self.lock_or_disable_account(lock=True)
+
+ def disable_account(self):
+ return self.lock_or_disable_account()
+
+ def lock_or_disable_account(self, lock=False):
+ account = self.get_account()
+ if not account:
+ account = self.present_account()
+
+ # we need to enable the account to lock it.
+ if lock and account['state'].lower() == 'disabled':
+ account = self.enable_account()
+
+ if (lock and account['state'].lower() != 'locked' or
+ not lock and account['state'].lower() != 'disabled'):
+ self.result['changed'] = True
+ args = {
+ 'id': account['id'],
+ 'account': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id'),
+ 'lock': lock,
+ }
+ if not self.module.check_mode:
+ account = self.cs.disableAccount(**args)
+
+ if 'errortext' in account:
+ self.module.fail_json(msg="Failed: '%s'" % account['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ account = self.poll_job(account, 'account')
+ return account
+
+ def present_account(self):
+ required_params = [
+ 'email',
+ 'username',
+ 'password',
+ 'first_name',
+ 'last_name',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ account = self.get_account()
+
+ if not account:
+ self.result['changed'] = True
+
+ args = {
+ 'account': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id'),
+ 'accounttype': self.get_account_type(),
+ 'networkdomain': self.module.params.get('network_domain'),
+ 'username': self.module.params.get('username'),
+ 'password': self.module.params.get('password'),
+ 'firstname': self.module.params.get('first_name'),
+ 'lastname': self.module.params.get('last_name'),
+ 'email': self.module.params.get('email'),
+ 'timezone': self.module.params.get('timezone')
+ }
+ if not self.module.check_mode:
+ res = self.cs.createAccount(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ account = res['account']
+ return account
+
+ def absent_account(self):
+ account = self.get_account()
+ if account:
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.deleteAccount(id=account['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'account')
+ return account
+
+ def get_result(self, account):
+ super(AnsibleCloudStackAccount, self).get_result(account)
+ if account:
+ if 'accounttype' in account:
+ for key, value in self.account_types.items():
+ if value == account['accounttype']:
+ self.result['account_type'] = key
+ break
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
+ account_type=dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
+ network_domain=dict(default=None),
+ domain=dict(default='ROOT'),
+ email=dict(default=None),
+ first_name=dict(default=None),
+ last_name=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ timezone=dict(default=None),
+ poll_async=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_acc = AnsibleCloudStackAccount(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent']:
+ account = acs_acc.absent_account()
+
+ elif state in ['enabled', 'unlocked']:
+ account = acs_acc.enable_account()
+
+ elif state in ['disabled']:
+ account = acs_acc.disable_account()
+
+ elif state in ['locked']:
+ account = acs_acc.lock_account()
+
+ else:
+ account = acs_acc.present_account()
+
+ result = acs_acc.get_result(account)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_affinitygroup.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_affinitygroup.py
new file mode 100644
index 0000000000..2ffe2bace1
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_affinitygroup.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_affinitygroup
+short_description: Manages affinity groups on Apache CloudStack based clouds.
+description:
+ - Create and remove affinity groups.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the affinity group.
+ required: true
+ affinty_type:
+ description:
+ - Type of the affinity group. If not specified, first found affinity type is used.
+ required: false
+ default: null
+ description:
+ description:
+ - Description of the affinity group.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the affinity group.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the affinity group is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the affinity group is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the affinity group is related to.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a affinity group
+- local_action:
+ module: cs_affinitygroup
+ name: haproxy
+ affinty_type: host anti-affinity
+
+# Remove a affinity group
+- local_action:
+ module: cs_affinitygroup
+ name: haproxy
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the affinity group.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+name:
+ description: Name of affinity group.
+ returned: success
+ type: string
+ sample: app
+description:
+ description: Description of affinity group.
+ returned: success
+ type: string
+ sample: application affinity group
+affinity_type:
+ description: Type of affinity group.
+ returned: success
+ type: string
+ sample: host anti-affinity
+project:
+ description: Name of project the affinity group is related to.
+ returned: success
+ type: string
+ sample: Production
+domain:
+ description: Domain the affinity group is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the affinity group is related to.
+ returned: success
+ type: string
+ sample: example account
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackAffinityGroup, self).__init__(module)
+ self.returns = {
+ 'type': 'affinity_type',
+ }
+ self.affinity_group = None
+
+ def get_affinity_group(self):
+ if not self.affinity_group:
+
+ args = {
+ 'projectid': self.get_project(key='id'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'name': self.module.params.get('name'),
+ }
+ affinity_groups = self.cs.listAffinityGroups(**args)
+ if affinity_groups:
+ self.affinity_group = affinity_groups['affinitygroup'][0]
+ return self.affinity_group
+
+ def get_affinity_type(self):
+ affinity_type = self.module.params.get('affinty_type')
+
+ affinity_types = self.cs.listAffinityGroupTypes()
+ if affinity_types:
+ if not affinity_type:
+ return affinity_types['affinityGroupType'][0]['type']
+
+ for a in affinity_types['affinityGroupType']:
+ if a['type'] == affinity_type:
+ return a['type']
+ self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
+
+ def create_affinity_group(self):
+ affinity_group = self.get_affinity_group()
+ if not affinity_group:
+ self.result['changed'] = True
+
+ args = {
+ 'name': self.module.params.get('name'),
+ 'type': self.get_affinity_type(),
+ 'description': self.module.params.get('description'),
+ 'projectid': self.get_project(key='id'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ }
+ if not self.module.check_mode:
+ res = self.cs.createAffinityGroup(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ affinity_group = self.poll_job(res, 'affinitygroup')
+ return affinity_group
+
+ def remove_affinity_group(self):
+ affinity_group = self.get_affinity_group()
+ if affinity_group:
+ self.result['changed'] = True
+
+ args = {
+ 'name': self.module.params.get('name'),
+ 'projectid': self.get_project(key='id'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ }
+ if not self.module.check_mode:
+ res = self.cs.deleteAffinityGroup(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ self.poll_job(res, 'affinitygroup')
+ return affinity_group
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ affinty_type=dict(default=None),
+ description=dict(default=None),
+ state=dict(choices=['present', 'absent'], default='present'),
+ domain=dict(default=None),
+ account=dict(default=None),
+ project=dict(default=None),
+ poll_async=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_ag = AnsibleCloudStackAffinityGroup(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ affinity_group = acs_ag.remove_affinity_group()
+ else:
+ affinity_group = acs_ag.create_affinity_group()
+
+ result = acs_ag.get_result(affinity_group)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_cluster.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_cluster.py
new file mode 100644
index 0000000000..4834c07b65
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_cluster.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_cluster
+short_description: Manages host clusters on Apache CloudStack based clouds.
+description:
+ - Create, update and remove clusters.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - name of the cluster.
+ required: true
+ zone:
+ description:
+ - Name of the zone in which the cluster belongs to.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ pod:
+ description:
+ - Name of the pod in which the cluster belongs to.
+ required: false
+ default: null
+ cluster_type:
+ description:
+ - Type of the cluster.
+ - Required if C(state=present)
+ required: false
+ default: null
+ choices: [ 'CloudManaged', 'ExternalManaged' ]
+ hypervisor:
+ description:
+ - Name the hypervisor to be used.
+ - Required if C(state=present).
+ required: false
+ default: none
+ choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
+ url:
+ description:
+ - URL for the cluster
+ required: false
+ default: null
+ username:
+ description:
+ - Username for the cluster.
+ required: false
+ default: null
+ password:
+ description:
+ - Password for the cluster.
+ required: false
+ default: null
+ guest_vswitch_name:
+ description:
+ - Name of virtual switch used for guest traffic in the cluster.
+ - This would override zone wide traffic label setting.
+ required: false
+ default: null
+ guest_vswitch_type:
+ description:
+ - Type of virtual switch used for guest traffic in the cluster.
+ - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
+ required: false
+ default: null
+ choices: [ 'vmwaresvs', 'vmwaredvs' ]
+ public_vswitch_name:
+ description:
+ - Name of virtual switch used for public traffic in the cluster.
+ - This would override zone wide traffic label setting.
+ required: false
+ default: null
+ public_vswitch_type:
+ description:
+ - Type of virtual switch used for public traffic in the cluster.
+ - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
+ required: false
+ default: null
+ choices: [ 'vmwaresvs', 'vmwaredvs' ]
+ vms_ip_address:
+ description:
+ - IP address of the VSM associated with this cluster.
+ required: false
+ default: null
+ vms_username:
+ description:
+ - Username for the VSM associated with this cluster.
+ required: false
+ default: null
+ vms_password:
+ description:
+ - Password for the VSM associated with this cluster.
+ required: false
+ default: null
+ ovm3_cluster:
+ description:
+ - Ovm3 native OCFS2 clustering enabled for cluster.
+ required: false
+ default: null
+ ovm3_pool:
+ description:
+ - Ovm3 native pooling enabled for cluster.
+ required: false
+ default: null
+ ovm3_vip:
+ description:
+ - Ovm3 vip to use for pool (and cluster).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the cluster.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'disabled', 'enabled' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a cluster is present
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ hypervisor: KVM
+ cluster_type: CloudManaged
+
+# Ensure a cluster is disabled
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ state: disabled
+
+# Ensure a cluster is enabled
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ state: enabled
+
+# Ensure a cluster is absent
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the cluster.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the cluster.
+ returned: success
+ type: string
+ sample: cluster01
+allocation_state:
+ description: State of the cluster.
+ returned: success
+ type: string
+ sample: Enabled
+cluster_type:
+ description: Type of the cluster.
+ returned: success
+ type: string
+ sample: ExternalManaged
+cpu_overcommit_ratio:
+ description: The CPU overcommit ratio of the cluster.
+ returned: success
+ type: string
+ sample: 1.0
+memory_overcommit_ratio:
+ description: The memory overcommit ratio of the cluster.
+ returned: success
+ type: string
+ sample: 1.0
+managed_state:
+ description: Whether this cluster is managed by CloudStack.
+ returned: success
+ type: string
+ sample: Managed
+ovm3_vip:
+ description: Ovm3 VIP to use for pooling and/or clustering
+ returned: success
+ type: string
+ sample: 10.10.10.101
+hypervisor:
+ description: Hypervisor of the cluster
+ returned: success
+ type: string
+ sample: VMware
+zone:
+ description: Name of zone the cluster is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+pod:
+ description: Name of pod the cluster is in.
+ returned: success
+ type: string
+ sample: pod01
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackCluster(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackCluster, self).__init__(module)
+ self.returns = {
+ 'allocationstate': 'allocation_state',
+ 'hypervisortype': 'hypervisor',
+ 'clustertype': 'cluster_type',
+ 'podname': 'pod',
+ 'managedstate': 'managed_state',
+ 'memoryovercommitratio': 'memory_overcommit_ratio',
+ 'cpuovercommitratio': 'cpu_overcommit_ratio',
+ 'ovm3vip': 'ovm3_vip',
+ }
+ self.cluster = None
+
+ def _get_common_cluster_args(self):
+ args = {
+ 'clustername': self.module.params.get('name'),
+ 'hypervisor': self.module.params.get('hypervisor'),
+ 'clustertype': self.module.params.get('cluster_type'),
+ }
+ state = self.module.params.get('state')
+ if state in ['enabled', 'disabled']:
+ args['allocationstate'] = state.capitalize()
+ return args
+
+ def get_pod(self, key=None):
+ args = {
+ 'name': self.module.params.get('pod'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ pods = self.cs.listPods(**args)
+ if pods:
+ return self._get_by_key(key, pods['pod'][0])
+ self.module.fail_json(msg="Pod %s not found in zone %s." % (self.module.params.get('pod'), self.get_zone(key='name')))
+
+ def get_cluster(self):
+ if not self.cluster:
+ args = {}
+
+ uuid = self.module.params.get('id')
+ if uuid:
+ args['id'] = uuid
+ clusters = self.cs.listClusters(**args)
+ if clusters:
+ self.cluster = clusters['cluster'][0]
+ return self.cluster
+
+ args['name'] = self.module.params.get('name')
+ clusters = self.cs.listClusters(**args)
+ if clusters:
+ self.cluster = clusters['cluster'][0]
+ # fix differnt return from API then request argument given
+ self.cluster['hypervisor'] = self.cluster['hypervisortype']
+ self.cluster['clustername'] = self.cluster['name']
+ return self.cluster
+
+ def present_cluster(self):
+ cluster = self.get_cluster()
+ if cluster:
+ cluster = self._update_cluster()
+ else:
+ cluster = self._create_cluster()
+ return cluster
+
+ def _create_cluster(self):
+ required_params = [
+ 'cluster_type',
+ 'hypervisor',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ args = self._get_common_cluster_args()
+ args['zoneid'] = self.get_zone(key='id')
+ args['podid'] = self.get_pod(key='id')
+ args['url'] = self.module.params.get('url')
+ args['username'] = self.module.params.get('username')
+ args['password'] = self.module.params.get('password')
+ args['guestvswitchname'] = self.module.params.get('guest_vswitch_name')
+ args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type')
+ args['publicvswitchtype'] = self.module.params.get('public_vswitch_name')
+ args['publicvswitchtype'] = self.module.params.get('public_vswitch_type')
+ args['vsmipaddress'] = self.module.params.get('vms_ip_address')
+ args['vsmusername'] = self.module.params.get('vms_username')
+ args['vmspassword'] = self.module.params.get('vms_password')
+ args['ovm3cluster'] = self.module.params.get('ovm3_cluster')
+ args['ovm3pool'] = self.module.params.get('ovm3_pool')
+ args['ovm3vip'] = self.module.params.get('ovm3_vip')
+
+ self.result['changed'] = True
+
+ cluster = None
+ if not self.module.check_mode:
+ res = self.cs.addCluster(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ # API returns a list as result CLOUDSTACK-9205
+ if isinstance(res['cluster'], list):
+ cluster = res['cluster'][0]
+ else:
+ cluster = res['cluster']
+ return cluster
+
+ def _update_cluster(self):
+ cluster = self.get_cluster()
+
+ args = self._get_common_cluster_args()
+ args['id'] = cluster['id']
+
+ if self.has_changed(args, cluster):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.updateCluster(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ cluster = res['cluster']
+ return cluster
+
+ def absent_cluster(self):
+ cluster = self.get_cluster()
+ if cluster:
+ self.result['changed'] = True
+
+ args = {
+ 'id': cluster['id'],
+ }
+ if not self.module.check_mode:
+ res = self.cs.deleteCluster(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return cluster
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ zone=dict(default=None),
+ pod=dict(default=None),
+ cluster_type=dict(choices=['CloudManaged', 'ExternalManaged'], default=None),
+ hypervisor=dict(choices=CS_HYPERVISORS, default=None),
+ state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ guest_vswitch_name=dict(default=None),
+ guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
+ public_vswitch_name=dict(default=None),
+ public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
+ vms_ip_address=dict(default=None),
+ vms_username=dict(default=None),
+ vms_password=dict(default=None, no_log=True),
+ ovm3_cluster=dict(default=None),
+ ovm3_pool=dict(default=None),
+ ovm3_vip=dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_cluster = AnsibleCloudStackCluster(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ cluster = acs_cluster.absent_cluster()
+ else:
+ cluster = acs_cluster.present_cluster()
+
+ result = acs_cluster.get_result(cluster)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_configuration.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_configuration.py
new file mode 100644
index 0000000000..9c62daeba7
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_configuration.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_configuration
+short_description: Manages configuration on Apache CloudStack based clouds.
+description:
+ - Manages global, zone, account, storage and cluster configurations.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the configuration.
+ required: true
+ value:
+ description:
+ - Value of the configuration.
+ required: true
+ account:
+ description:
+ - Ensure the value for corresponding account.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the account is related to.
+ - Only considered if C(account) is used.
+ required: false
+ default: ROOT
+ zone:
+ description:
+ - Ensure the value for corresponding zone.
+ required: false
+ default: null
+ storage:
+ description:
+ - Ensure the value for corresponding storage pool.
+ required: false
+ default: null
+ cluster:
+ description:
+ - Ensure the value for corresponding cluster.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure global configuration
+- local_action:
+ module: cs_configuration
+ name: router.reboot.when.outofband.migrated
+ value: false
+
+# Ensure zone configuration
+- local_action:
+ module: cs_configuration
+ name: router.reboot.when.outofband.migrated
+ zone: ch-gva-01
+ value: true
+
+# Ensure storage configuration
+- local_action:
+ module: cs_configuration
+ name: storage.overprovisioning.factor
+ storage: storage01
+ value: 2.0
+
+# Ensure account configuration
+- local_action:
+ module: cs_configuration:
+ name: allow.public.user.templates
+ value: false
+ account: acme inc
+ domain: customers
+'''
+
+RETURN = '''
+---
+category:
+ description: Category of the configuration.
+ returned: success
+ type: string
+ sample: Advanced
+scope:
+ description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated.
+ returned: success
+ type: string
+ sample: storagepool
+description:
+ description: Description of the configuration.
+ returned: success
+ type: string
+ sample: Setup the host to do multipath
+name:
+ description: Name of the configuration.
+ returned: success
+ type: string
+ sample: zone.vlan.capacity.notificationthreshold
+value:
+ description: Value of the configuration.
+ returned: success
+ type: string
+ sample: "0.75"
+account:
+ description: Account of the configuration.
+ returned: success
+ type: string
+ sample: admin
+Domain:
+ description: Domain of account of the configuration.
+ returned: success
+ type: string
+ sample: ROOT
+zone:
+ description: Zone of the configuration.
+ returned: success
+ type: string
+ sample: ch-gva-01
+cluster:
+ description: Cluster of the configuration.
+ returned: success
+ type: string
+ sample: cluster01
+storage:
+ description: Storage of the configuration.
+ returned: success
+ type: string
+ sample: storage01
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackConfiguration(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackConfiguration, self).__init__(module)
+ self.returns = {
+ 'category': 'category',
+ 'scope': 'scope',
+ 'value': 'value',
+ }
+ self.storage = None
+ self.account = None
+ self.cluster = None
+
+
+ def _get_common_configuration_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['accountid'] = self.get_account(key='id')
+ args['storageid'] = self.get_storage(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['clusterid'] = self.get_cluster(key='id')
+ return args
+
+
+ def get_zone(self, key=None):
+ # make sure we do net use the default zone
+ zone = self.module.params.get('zone')
+ if zone:
+ return super(AnsibleCloudStackConfiguration, self).get_zone(key=key)
+
+
+ def get_cluster(self, key=None):
+ if not self.cluster:
+ cluster_name = self.module.params.get('cluster')
+ if not cluster_name:
+ return None
+ args = {}
+ args['name'] = cluster_name
+ clusters = self.cs.listClusters(**args)
+ if clusters:
+ self.cluster = clusters['cluster'][0]
+ self.result['cluster'] = self.cluster['name']
+ else:
+ self.module.fail_json(msg="Cluster %s not found." % cluster_name)
+ return self._get_by_key(key=key, my_dict=self.cluster)
+
+
+ def get_storage(self, key=None):
+ if not self.storage:
+ storage_pool_name = self.module.params.get('storage')
+ if not storage_pool_name:
+ return None
+ args = {}
+ args['name'] = storage_pool_name
+ storage_pools = self.cs.listStoragePools(**args)
+ if storage_pools:
+ self.storage = storage_pools['storagepool'][0]
+ self.result['storage'] = self.storage['name']
+ else:
+ self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name)
+ return self._get_by_key(key=key, my_dict=self.storage)
+
+
+ def get_configuration(self):
+ configuration = None
+ args = self._get_common_configuration_args()
+ configurations = self.cs.listConfigurations(**args)
+ if not configurations:
+ self.module.fail_json(msg="Configuration %s not found." % args['name'])
+ configuration = configurations['configuration'][0]
+ return configuration
+
+
+ def get_value(self):
+ value = str(self.module.params.get('value'))
+ if value in ('True', 'False'):
+ value = value.lower()
+ return value
+
+
+ def present_configuration(self):
+ configuration = self.get_configuration()
+ args = self._get_common_configuration_args()
+ args['value'] = self.get_value()
+ if self.has_changed(args, configuration, ['value']):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateConfiguration(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ configuration = res['configuration']
+ return configuration
+
+
+ def get_result(self, configuration):
+ self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration)
+ if self.account:
+ self.result['account'] = self.account['name']
+ self.result['domain'] = self.domain['path']
+ elif self.zone:
+ self.result['zone'] = self.zone['name']
+ return self.result
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ value = dict(type='str', required=True),
+ zone = dict(default=None),
+ storage = dict(default=None),
+ cluster = dict(default=None),
+ account = dict(default=None),
+ domain = dict(default='ROOT')
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_configuration = AnsibleCloudStackConfiguration(module)
+ configuration = acs_configuration.present_configuration()
+ result = acs_configuration.get_result(configuration)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_domain.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_domain.py
new file mode 100644
index 0000000000..17c93a8461
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_domain.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_domain
+short_description: Manages domains on Apache CloudStack based clouds.
+description:
+ - Create, update and remove domains.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ path:
+ description:
+ - Path of the domain.
+ - Prefix C(ROOT/) or C(/ROOT/) in path is optional.
+ required: true
+ network_domain:
+ description:
+ - Network domain for networks in the domain.
+ required: false
+ default: null
+ clean_up:
+ description:
+ - Clean up all domain resources like child domains and accounts.
+ - Considered on C(state=absent).
+ required: false
+ default: false
+ state:
+ description:
+ - State of the domain.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a domain
+local_action:
+ module: cs_domain
+ path: ROOT/customers
+ network_domain: customers.example.com
+
+# Create another subdomain
+local_action:
+ module: cs_domain
+ path: ROOT/customers/xy
+ network_domain: xy.customers.example.com
+
+# Remove a domain
+local_action:
+ module: cs_domain
+ path: ROOT/customers/xy
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the domain.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+name:
+ description: Name of the domain.
+ returned: success
+ type: string
+ sample: customers
+path:
+ description: Domain path.
+ returned: success
+ type: string
+ sample: /ROOT/customers
+parent_domain:
+ description: Parent domain of the domain.
+ returned: success
+ type: string
+ sample: ROOT
+network_domain:
+ description: Network domain of the domain.
+ returned: success
+ type: string
+ sample: example.local
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackDomain(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackDomain, self).__init__(module)
+ self.returns = {
+ 'path': 'path',
+ 'networkdomain': 'network_domain',
+ 'parentdomainname': 'parent_domain',
+ }
+ self.domain = None
+
+
+ def _get_domain_internal(self, path=None):
+ if not path:
+ path = self.module.params.get('path')
+
+ if path.endswith('/'):
+ self.module.fail_json(msg="Path '%s' must not end with /" % path)
+
+ path = path.lower()
+
+ if path.startswith('/') and not path.startswith('/root/'):
+ path = "root" + path
+ elif not path.startswith('root/'):
+ path = "root/" + path
+
+ args = {}
+ args['listall'] = True
+
+ domains = self.cs.listDomains(**args)
+ if domains:
+ for d in domains['domain']:
+ if path == d['path'].lower():
+ return d
+ return None
+
+
+ def get_name(self):
+ # last part of the path is the name
+ name = self.module.params.get('path').split('/')[-1:]
+ return name
+
+
+ def get_domain(self, key=None):
+ if not self.domain:
+ self.domain = self._get_domain_internal()
+ return self._get_by_key(key, self.domain)
+
+
+ def get_parent_domain(self, key=None):
+ path = self.module.params.get('path')
+ # cut off last /*
+ path = '/'.join(path.split('/')[:-1])
+ if not path:
+ return None
+ parent_domain = self._get_domain_internal(path=path)
+ if not parent_domain:
+ self.module.fail_json(msg="Parent domain path %s does not exist" % path)
+ return self._get_by_key(key, parent_domain)
+
+
+ def present_domain(self):
+ domain = self.get_domain()
+ if not domain:
+ domain = self.create_domain(domain)
+ else:
+ domain = self.update_domain(domain)
+ return domain
+
+
+ def create_domain(self, domain):
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.get_name()
+ args['parentdomainid'] = self.get_parent_domain(key='id')
+ args['networkdomain'] = self.module.params.get('network_domain')
+
+ if not self.module.check_mode:
+ res = self.cs.createDomain(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ domain = res['domain']
+ return domain
+
+
+ def update_domain(self, domain):
+ args = {}
+ args['id'] = domain['id']
+ args['networkdomain'] = self.module.params.get('network_domain')
+
+ if self.has_changed(args, domain):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateDomain(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ domain = res['domain']
+ return domain
+
+
+ def absent_domain(self):
+ domain = self.get_domain()
+ if domain:
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ args = {}
+ args['id'] = domain['id']
+ args['cleanup'] = self.module.params.get('clean_up')
+ res = self.cs.deleteDomain(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'domain')
+ return domain
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ path = dict(required=True),
+ state = dict(choices=['present', 'absent'], default='present'),
+ network_domain = dict(default=None),
+ clean_up = dict(type='bool', default=False),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_dom = AnsibleCloudStackDomain(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ domain = acs_dom.absent_domain()
+ else:
+ domain = acs_dom.present_domain()
+
+ result = acs_dom.get_result(domain)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_facts.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_facts.py
new file mode 100644
index 0000000000..4a77447953
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_facts.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_facts
+short_description: Gather facts on instances of Apache CloudStack based clouds.
+description:
+ - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ filter:
+ description:
+ - Filter for a specific fact.
+ required: false
+ default: null
+ choices:
+ - cloudstack_service_offering
+ - cloudstack_availability_zone
+ - cloudstack_public_hostname
+ - cloudstack_public_ipv4
+ - cloudstack_local_hostname
+ - cloudstack_local_ipv4
+ - cloudstack_instance_id
+ - cloudstack_user_data
+requirements: [ 'yaml' ]
+'''
+
+EXAMPLES = '''
+# Gather all facts on instances
+- name: Gather cloudstack facts
+ cs_facts:
+
+# Gather specific fact on instances
+- name: Gather cloudstack facts
+ cs_facts: filter=cloudstack_instance_id
+'''
+
+RETURN = '''
+---
+cloudstack_availability_zone:
+ description: zone the instance is deployed in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+cloudstack_instance_id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_local_hostname:
+ description: local hostname of the instance.
+ returned: success
+ type: string
+ sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_local_ipv4:
+ description: local IPv4 of the instance.
+ returned: success
+ type: string
+ sample: 185.19.28.35
+cloudstack_public_hostname:
+ description: public IPv4 of the router. Same as C(cloudstack_public_ipv4).
+ returned: success
+ type: string
+ sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_public_ipv4:
+ description: public IPv4 of the router.
+ returned: success
+ type: string
+ sample: 185.19.28.35
+cloudstack_service_offering:
+ description: service offering of the instance.
+ returned: success
+ type: string
+ sample: Micro 512mb 1cpu
+cloudstack_user_data:
+ description: data of the instance provided by users.
+ returned: success
+ type: dict
+ sample: { "bla": "foo" }
+'''
+
+import os
+
+try:
+ import yaml
+ has_lib_yaml = True
+except ImportError:
+ has_lib_yaml = False
+
+CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
+CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
+
+class CloudStackFacts(object):
+
+ def __init__(self):
+ self.facts = ansible_facts(module)
+ self.api_ip = None
+ self.fact_paths = {
+ 'cloudstack_service_offering': 'service-offering',
+ 'cloudstack_availability_zone': 'availability-zone',
+ 'cloudstack_public_hostname': 'public-hostname',
+ 'cloudstack_public_ipv4': 'public-ipv4',
+ 'cloudstack_local_hostname': 'local-hostname',
+ 'cloudstack_local_ipv4': 'local-ipv4',
+ 'cloudstack_instance_id': 'instance-id'
+ }
+
+ def run(self):
+ result = {}
+ filter = module.params.get('filter')
+ if not filter:
+ for key,path in self.fact_paths.iteritems():
+ result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
+ result['cloudstack_user_data'] = self._get_user_data_json()
+ else:
+ if filter == 'cloudstack_user_data':
+ result['cloudstack_user_data'] = self._get_user_data_json()
+ elif filter in self.fact_paths:
+ result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
+ return result
+
+
+ def _get_user_data_json(self):
+ try:
+ # this data come form users, we try what we can to parse it...
+ return yaml.load(self._fetch(CS_USERDATA_BASE_URL))
+ except:
+ return None
+
+
+ def _fetch(self, path):
+ api_ip = self._get_api_ip()
+ if not api_ip:
+ return None
+ api_url = path % api_ip
+ (response, info) = fetch_url(module, api_url, force=True)
+ if response:
+ data = response.read()
+ else:
+ data = None
+ return data
+
+
+ def _get_dhcp_lease_file(self):
+ """Return the path of the lease file."""
+ default_iface = self.facts['default_ipv4']['interface']
+ dhcp_lease_file_locations = [
+ '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
+ '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
+ '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
+ '/var/db/dhclient.leases.%s' % default_iface, # openbsd
+ ]
+ for file_path in dhcp_lease_file_locations:
+ if os.path.exists(file_path):
+ return file_path
+ module.fail_json(msg="Could not find dhclient leases file.")
+
+
+ def _get_api_ip(self):
+ """Return the IP of the DHCP server."""
+ if not self.api_ip:
+ dhcp_lease_file = self._get_dhcp_lease_file()
+ for line in open(dhcp_lease_file):
+ if 'dhcp-server-identifier' in line:
+ # get IP of string "option dhcp-server-identifier 185.19.28.176;"
+ line = line.translate(None, ';')
+ self.api_ip = line.split()[2]
+ break
+ if not self.api_ip:
+ module.fail_json(msg="No dhcp-server-identifier found in leases file.")
+ return self.api_ip
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ filter = dict(default=None, choices=[
+ 'cloudstack_service_offering',
+ 'cloudstack_availability_zone',
+ 'cloudstack_public_hostname',
+ 'cloudstack_public_ipv4',
+ 'cloudstack_local_hostname',
+ 'cloudstack_local_ipv4',
+ 'cloudstack_instance_id',
+ 'cloudstack_user_data',
+ ]),
+ ),
+ supports_check_mode=False
+ )
+
+ if not has_lib_yaml:
+ module.fail_json(msg="missing python library: yaml")
+
+ cs_facts = CloudStackFacts().run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.facts import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_firewall.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_firewall.py
new file mode 100644
index 0000000000..1a677da4df
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_firewall.py
@@ -0,0 +1,429 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_firewall
+short_description: Manages firewall rules on Apache CloudStack based clouds.
+description:
+ - Creates and removes firewall rules.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the ingress rule is assigned to.
+ - Required if C(type=ingress).
+ required: false
+ default: null
+ network:
+ description:
+ - Network the egress rule is related to.
+ - Required if C(type=egress).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the firewall rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ type:
+ description:
+ - Type of the firewall rule.
+ required: false
+ default: 'ingress'
+ choices: [ 'ingress', 'egress' ]
+ protocol:
+ description:
+ - Protocol of the firewall rule.
+ - C(all) is only available if C(type=egress)
+ required: false
+ default: 'tcp'
+ choices: [ 'tcp', 'udp', 'icmp', 'all' ]
+ cidr:
+ description:
+ - CIDR (full notation) to be used for firewall rule.
+ required: false
+ default: '0.0.0.0/0'
+ start_port:
+ description:
+ - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp).
+ required: false
+ default: null
+ aliases: [ 'port' ]
+ end_port:
+ description:
+ - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). If not specified, equal C(start_port).
+ required: false
+ default: null
+ icmp_type:
+ description:
+ - Type of the icmp message being sent. Considered if C(protocol=icmp).
+ required: false
+ default: null
+ icmp_code:
+ description:
+ - Error code for this icmp message. Considered if C(protocol=icmp).
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the firewall rule is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the firewall rule is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the firewall rule is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1
+- local_action:
+ module: cs_firewall
+ ip_address: 4.3.2.1
+ port: 80
+ cidr: 1.2.3.4/32
+
+# Allow inbound tcp/udp port 53 to 4.3.2.1
+- local_action:
+ module: cs_firewall
+ ip_address: 4.3.2.1
+ port: 53
+ protocol: '{{ item }}'
+ with_items:
+ - tcp
+ - udp
+
+# Ensure firewall rule is removed
+- local_action:
+ module: cs_firewall
+ ip_address: 4.3.2.1
+ start_port: 8000
+ end_port: 8888
+ cidr: 17.0.0.0/8
+ state: absent
+
+# Allow all outbound traffic
+- local_action:
+ module: cs_firewall
+ network: my_network
+ type: egress
+ protocol: all
+
+# Allow only HTTP outbound traffic for an IP
+- local_action:
+ module: cs_firewall
+ network: my_network
+ type: egress
+ port: 80
+ cidr: 10.101.1.20
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the rule.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+ip_address:
+ description: IP address of the rule if C(type=ingress)
+ returned: success
+ type: string
+ sample: 10.100.212.10
+type:
+ description: Type of the rule.
+ returned: success
+ type: string
+ sample: ingress
+cidr:
+ description: CIDR of the rule.
+ returned: success
+ type: string
+ sample: 0.0.0.0/0
+protocol:
+ description: Protocol of the rule.
+ returned: success
+ type: string
+ sample: tcp
+start_port:
+ description: Start port of the rule.
+ returned: success
+ type: int
+ sample: 80
+end_port:
+ description: End port of the rule.
+ returned: success
+ type: int
+ sample: 80
+icmp_code:
+ description: ICMP code of the rule.
+ returned: success
+ type: int
+ sample: 1
+icmp_type:
+ description: ICMP type of the rule.
+ returned: success
+ type: int
+ sample: 1
+network:
+ description: Name of the network if C(type=egress)
+ returned: success
+ type: string
+ sample: my_network
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackFirewall(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackFirewall, self).__init__(module)
+ self.returns = {
+ 'cidrlist': 'cidr',
+ 'startport': 'start_port',
+ 'endpoint': 'end_port',
+ 'protocol': 'protocol',
+ 'ipaddress': 'ip_address',
+ 'icmpcode': 'icmp_code',
+ 'icmptype': 'icmp_type',
+ }
+ self.firewall_rule = None
+ self.network = None
+
+
+ def get_firewall_rule(self):
+ if not self.firewall_rule:
+ cidr = self.module.params.get('cidr')
+ protocol = self.module.params.get('protocol')
+ start_port = self.module.params.get('start_port')
+ end_port = self.get_or_fallback('end_port', 'start_port')
+ icmp_code = self.module.params.get('icmp_code')
+ icmp_type = self.module.params.get('icmp_type')
+ fw_type = self.module.params.get('type')
+
+ if protocol in ['tcp', 'udp'] and not (start_port and end_port):
+ self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol)
+
+ if protocol == 'icmp' and not icmp_type:
+ self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type")
+
+ if protocol == 'all' and fw_type != 'egress':
+ self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'" )
+
+ args = {}
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ if fw_type == 'egress':
+ args['networkid'] = self.get_network(key='id')
+ if not args['networkid']:
+ self.module.fail_json(msg="missing required argument for type egress: network")
+ firewall_rules = self.cs.listEgressFirewallRules(**args)
+ else:
+ args['ipaddressid'] = self.get_ip_address('id')
+ if not args['ipaddressid']:
+ self.module.fail_json(msg="missing required argument for type ingress: ip_address")
+ firewall_rules = self.cs.listFirewallRules(**args)
+
+ if firewall_rules and 'firewallrule' in firewall_rules:
+ for rule in firewall_rules['firewallrule']:
+ type_match = self._type_cidr_match(rule, cidr)
+
+ protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \
+ or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
+ or self._egress_all_match(rule, protocol, fw_type)
+
+ if type_match and protocol_match:
+ self.firewall_rule = rule
+ break
+ return self.firewall_rule
+
+
+ def _tcp_udp_match(self, rule, protocol, start_port, end_port):
+ return protocol in ['tcp', 'udp'] \
+ and protocol == rule['protocol'] \
+ and start_port == int(rule['startport']) \
+ and end_port == int(rule['endport'])
+
+
+ def _egress_all_match(self, rule, protocol, fw_type):
+ return protocol in ['all'] \
+ and protocol == rule['protocol'] \
+ and fw_type == 'egress'
+
+
+ def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
+ return protocol == 'icmp' \
+ and protocol == rule['protocol'] \
+ and icmp_code == rule['icmpcode'] \
+ and icmp_type == rule['icmptype']
+
+
+ def _type_cidr_match(self, rule, cidr):
+ return cidr == rule['cidrlist']
+
+
+ def create_firewall_rule(self):
+ firewall_rule = self.get_firewall_rule()
+ if not firewall_rule:
+ self.result['changed'] = True
+
+ args = {}
+ args['cidrlist'] = self.module.params.get('cidr')
+ args['protocol'] = self.module.params.get('protocol')
+ args['startport'] = self.module.params.get('start_port')
+ args['endport'] = self.get_or_fallback('end_port', 'start_port')
+ args['icmptype'] = self.module.params.get('icmp_type')
+ args['icmpcode'] = self.module.params.get('icmp_code')
+
+ fw_type = self.module.params.get('type')
+ if not self.module.check_mode:
+ if fw_type == 'egress':
+ args['networkid'] = self.get_network(key='id')
+ res = self.cs.createEgressFirewallRule(**args)
+ else:
+ args['ipaddressid'] = self.get_ip_address('id')
+ res = self.cs.createFirewallRule(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ firewall_rule = self.poll_job(res, 'firewallrule')
+ return firewall_rule
+
+
+ def remove_firewall_rule(self):
+ firewall_rule = self.get_firewall_rule()
+ if firewall_rule:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = firewall_rule['id']
+
+ fw_type = self.module.params.get('type')
+ if not self.module.check_mode:
+ if fw_type == 'egress':
+ res = self.cs.deleteEgressFirewallRule(**args)
+ else:
+ res = self.cs.deleteFirewallRule(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'firewallrule')
+ return firewall_rule
+
+
+ def get_result(self, firewall_rule):
+ super(AnsibleCloudStackFirewall, self).get_result(firewall_rule)
+ if firewall_rule:
+ self.result['type'] = self.module.params.get('type')
+ if self.result['type'] == 'egress':
+ self.result['network'] = self.get_network(key='displaytext')
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(default=None),
+ network = dict(default=None),
+ cidr = dict(default='0.0.0.0/0'),
+ protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'),
+ type = dict(choices=['ingress', 'egress'], default='ingress'),
+ icmp_type = dict(type='int', default=None),
+ icmp_code = dict(type='int', default=None),
+ start_port = dict(type='int', aliases=['port'], default=None),
+ end_port = dict(type='int', default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ required_together = cs_required_together()
+ required_together.extend([
+ ['icmp_type', 'icmp_code'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ required_one_of = (
+ ['ip_address', 'network'],
+ ),
+ mutually_exclusive = (
+ ['icmp_type', 'start_port'],
+ ['icmp_type', 'end_port'],
+ ['ip_address', 'network'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_fw = AnsibleCloudStackFirewall(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ fw_rule = acs_fw.remove_firewall_rule()
+ else:
+ fw_rule = acs_fw.create_firewall_rule()
+
+ result = acs_fw.get_result(fw_rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_instance.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_instance.py
new file mode 100644
index 0000000000..49fb21329c
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_instance.py
@@ -0,0 +1,1002 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_instance
+short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
+description:
+ - Deploy, start, update, scale, restart, restore, stop and destroy instances.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Host name of the instance. C(name) can only contain ASCII letters.
+ - Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards.
+ - Either C(name) or C(display_name) is required.
+ required: false
+ default: null
+ display_name:
+ description:
+ - Custom display name of the instances.
+ - Display name will be set to C(name) if not specified.
+ - Either C(name) or C(display_name) is required.
+ required: false
+ default: null
+ group:
+ description:
+ - Group in where the new instance should be in.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the instance.
+ required: false
+ default: 'present'
+ choices: [ 'deployed', 'started', 'stopped', 'restarted', 'restored', 'destroyed', 'expunged', 'present', 'absent' ]
+ service_offering:
+ description:
+ - Name or id of the service offering of the new instance.
+ - If not set, first found service offering is used.
+ required: false
+ default: null
+ cpu:
+ description:
+ - The number of CPUs to allocate to the instance, used with custom service offerings
+ required: false
+ default: null
+ cpu_speed:
+ description:
+ - The clock speed/shares allocated to the instance, used with custom service offerings
+ required: false
+ default: null
+ memory:
+ description:
+ - The memory allocated to the instance, used with custom service offerings
+ required: false
+ default: null
+ template:
+ description:
+ - Name or id of the template to be used for creating the new instance.
+ - Required when using C(state=present).
+ - Mutually exclusive with C(ISO) option.
+ required: false
+ default: null
+ iso:
+ description:
+ - Name or id of the ISO to be used for creating the new instance.
+ - Required when using C(state=present).
+ - Mutually exclusive with C(template) option.
+ required: false
+ default: null
+ template_filter:
+ description:
+ - Name of the filter used to search for the template or iso.
+ - Used for params C(iso) or C(template) on C(state=present).
+ required: false
+ default: 'executable'
+ choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
+ aliases: [ 'iso_filter' ]
+ version_added: '2.1'
+ hypervisor:
+ description:
+ - Name the hypervisor to be used for creating the new instance.
+ - Relevant when using C(state=present), but only considered if not set on ISO/template.
+ - If not set or found on ISO/template, first found hypervisor will be used.
+ required: false
+ default: null
+ choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
+ keyboard:
+ description:
+ - Keyboard device type for the instance.
+ required: false
+ default: null
+ choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
+ networks:
+ description:
+ - List of networks to use for the new instance.
+ required: false
+ default: []
+ aliases: [ 'network' ]
+ ip_address:
+ description:
+ - IPv4 address for default instance's network during creation.
+ required: false
+ default: null
+ ip6_address:
+ description:
+ - IPv6 address for default instance's network.
+ required: false
+ default: null
+ ip_to_networks:
+ description:
+ - "List of mappings in the form {'network': NetworkName, 'ip': 1.2.3.4}"
+ - Mutually exclusive with C(networks) option.
+ required: false
+ default: null
+ aliases: [ 'ip_to_network' ]
+ disk_offering:
+ description:
+ - Name of the disk offering to be used.
+ required: false
+ default: null
+ disk_size:
+ description:
+ - Disk size in GByte required if deploying instance from ISO.
+ required: false
+ default: null
+ root_disk_size:
+ description:
+ - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
+ required: false
+ default: null
+ security_groups:
+ description:
+ - List of security groups the instance to be applied to.
+ required: false
+ default: null
+ aliases: [ 'security_group' ]
+ domain:
+ description:
+ - Domain the instance is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the instance to be deployed in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the instance shoud be deployed.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ ssh_key:
+ description:
+ - Name of the SSH key to be deployed on the new instance.
+ required: false
+ default: null
+ affinity_groups:
+ description:
+ - Affinity groups names to be applied to the new instance.
+ required: false
+ default: []
+ aliases: [ 'affinity_group' ]
+ user_data:
+ description:
+ - Optional data (ASCII) that can be sent to the instance upon a successful deployment.
+ - The data will be automatically base64 encoded.
+ - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
+ required: false
+ default: null
+ force:
+ description:
+ - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
+ required: false
+ default: false
+ tags:
+ description:
+ - List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
+ - "If you want to delete all tags, set a empty list e.g. C(tags: [])."
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a instance from an ISO
+# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
+- local_action:
+ module: cs_instance
+ name: web-vm-1
+ iso: Linux Debian 7 64-bit
+ hypervisor: VMware
+ project: Integration
+ zone: ch-zrh-ix-01
+ service_offering: 1cpu_1gb
+ disk_offering: PerfPlus Storage
+ disk_size: 20
+ networks:
+ - Server Integration
+ - Sync Integration
+ - Storage Integration
+
+# For changing a running instance, use the 'force' parameter
+- local_action:
+ module: cs_instance
+ name: web-vm-1
+ display_name: web-vm-01.example.com
+ iso: Linux Debian 7 64-bit
+ service_offering: 2cpu_2gb
+ force: yes
+
+# Create or update a instance on Exoscale's public cloud using display_name.
+# Note: user_data can be used to kickstart the instance using cloud-init yaml config.
+- local_action:
+ module: cs_instance
+ display_name: web-vm-1
+ template: Linux Debian 7 64-bit
+ service_offering: Tiny
+ ssh_key: john@example.com
+ tags:
+ - { key: admin, value: john }
+ - { key: foo, value: bar }
+ user_data: |
+ #cloud-config
+ packages:
+ - nginx
+
+# Create an instance with multiple interfaces specifying the IP addresses
+- local_action:
+ module: cs_instance
+ name: web-vm-1
+ template: Linux Debian 7 64-bit
+ service_offering: Tiny
+ ip_to_networks:
+ - {'network': NetworkA, 'ip': '10.1.1.1'}
+ - {'network': NetworkB, 'ip': '192.0.2.1'}
+
+# Ensure an instance is stopped
+- local_action: cs_instance name=web-vm-1 state=stopped
+
+# Ensure an instance is running
+- local_action: cs_instance name=web-vm-1 state=started
+
+# Remove an instance
+- local_action: cs_instance name=web-vm-1 state=absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+display_name:
+ description: Display name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+group:
+ description: Group name of the instance is related.
+ returned: success
+ type: string
+ sample: web
+created:
+ description: Date of the instance was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+password_enabled:
+ description: True if password setting is enabled.
+ returned: success
+ type: boolean
+ sample: true
+password:
+ description: The password of the instance if exists.
+ returned: success
+ type: string
+ sample: Ge2oe7Do
+ssh_key:
+ description: Name of SSH key deployed to instance.
+ returned: success
+ type: string
+ sample: key@work
+domain:
+ description: Domain the instance is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the instance is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the instance is related to.
+ returned: success
+ type: string
+ sample: Production
+default_ip:
+ description: Default IP address of the instance.
+ returned: success
+ type: string
+ sample: 10.23.37.42
+public_ip:
+ description: Public IP address with instance via static NAT rule.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+iso:
+ description: Name of ISO the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+template:
+ description: Name of template the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+service_offering:
+ description: Name of the service offering the instance has.
+ returned: success
+ type: string
+ sample: 2cpu_2gb
+zone:
+ description: Name of zone the instance is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+state:
+ description: State of the instance.
+ returned: success
+ type: string
+ sample: Running
+security_groups:
+ description: Security groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "default" ]'
+affinity_groups:
+ description: Affinity groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "webservers" ]'
+tags:
+ description: List of resource tags associated with the instance.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+hypervisor:
+ description: Hypervisor related to this instance.
+ returned: success
+ type: string
+ sample: KVM
+instance_name:
+ description: Internal name of the instance (ROOT admin only).
+ returned: success
+ type: string
+ sample: i-44-3992-VM
+'''
+
+import base64
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackInstance(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackInstance, self).__init__(module)
+ self.returns = {
+ 'group': 'group',
+ 'hypervisor': 'hypervisor',
+ 'instancename': 'instance_name',
+ 'publicip': 'public_ip',
+ 'passwordenabled': 'password_enabled',
+ 'password': 'password',
+ 'serviceofferingname': 'service_offering',
+ 'isoname': 'iso',
+ 'templatename': 'template',
+ 'keypair': 'ssh_key',
+ }
+ self.instance = None
+ self.template = None
+ self.iso = None
+
+
+ def get_service_offering_id(self):
+ service_offering = self.module.params.get('service_offering')
+
+ service_offerings = self.cs.listServiceOfferings()
+ if service_offerings:
+ if not service_offering:
+ return service_offerings['serviceoffering'][0]['id']
+
+ for s in service_offerings['serviceoffering']:
+ if service_offering in [ s['name'], s['id'] ]:
+ return s['id']
+ self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
+
+
+ def get_template_or_iso(self, key=None):
+ template = self.module.params.get('template')
+ iso = self.module.params.get('iso')
+
+ if not template and not iso:
+ return None
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['isrecursive'] = True
+
+ if template:
+ if self.template:
+ return self._get_by_key(key, self.template)
+
+ args['templatefilter'] = self.module.params.get('template_filter')
+ templates = self.cs.listTemplates(**args)
+ if templates:
+ for t in templates['template']:
+ if template in [ t['displaytext'], t['name'], t['id'] ]:
+ self.template = t
+ return self._get_by_key(key, self.template)
+ self.module.fail_json(msg="Template '%s' not found" % template)
+
+ elif iso:
+ if self.iso:
+ return self._get_by_key(key, self.iso)
+ args['isofilter'] = self.module.params.get('template_filter')
+ isos = self.cs.listIsos(**args)
+ if isos:
+ for i in isos['iso']:
+ if iso in [ i['displaytext'], i['name'], i['id'] ]:
+ self.iso = i
+ return self._get_by_key(key, self.iso)
+ self.module.fail_json(msg="ISO '%s' not found" % iso)
+
+
+ def get_disk_offering_id(self):
+ disk_offering = self.module.params.get('disk_offering')
+
+ if not disk_offering:
+ return None
+
+ disk_offerings = self.cs.listDiskOfferings()
+ if disk_offerings:
+ for d in disk_offerings['diskoffering']:
+ if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
+ return d['id']
+ self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
+
+
+ def get_instance(self):
+ instance = self.instance
+ if not instance:
+ instance_name = self.get_or_fallback('name', 'display_name')
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ # Do not pass zoneid, as the instance name must be unique across zones.
+ instances = self.cs.listVirtualMachines(**args)
+ if instances:
+ for v in instances['virtualmachine']:
+ if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
+ self.instance = v
+ break
+ return self.instance
+
+
+ def get_iptonetwork_mappings(self):
+ network_mappings = self.module.params.get('ip_to_networks')
+ if network_mappings is None:
+ return
+
+ if network_mappings and self.module.params.get('networks'):
+ self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
+
+ network_names = [n['network'] for n in network_mappings]
+ ids = self.get_network_ids(network_names)
+ res = []
+ for i, data in enumerate(network_mappings):
+ res.append({'networkid': ids[i], 'ip': data['ip']})
+ return res
+
+
+ def security_groups_has_changed(self):
+ security_groups = self.module.params.get('security_groups')
+ if security_groups is None:
+ return False
+
+ security_groups = [s.lower() for s in security_groups]
+ instance_security_groups = self.instance.get('securitygroup',[])
+
+ instance_security_group_names = []
+ for instance_security_group in instance_security_groups:
+ if instance_security_group['name'].lower() not in security_groups:
+ return True
+ else:
+ instance_security_group_names.append(instance_security_group['name'].lower())
+
+ for security_group in security_groups:
+ if security_group not in instance_security_group_names:
+ return True
+ return False
+
+
+ def get_network_ids(self, network_names=None):
+ if network_names is None:
+ network_names = self.module.params.get('networks')
+
+ if not network_names:
+ return None
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+
+ networks = self.cs.listNetworks(**args)
+ if not networks:
+ self.module.fail_json(msg="No networks available")
+
+ network_ids = []
+ network_displaytexts = []
+ for network_name in network_names:
+ for n in networks['network']:
+ if network_name in [ n['displaytext'], n['name'], n['id'] ]:
+ network_ids.append(n['id'])
+ network_displaytexts.append(n['name'])
+ break
+
+ if len(network_ids) != len(network_names):
+ self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
+
+ return network_ids
+
+
+ def present_instance(self, start_vm=True):
+ instance = self.get_instance()
+
+ if not instance:
+ instance = self.deploy_instance(start_vm=start_vm)
+ else:
+ instance = self.recover_instance(instance=instance)
+ instance = self.update_instance(instance=instance, start_vm=start_vm)
+
+ # In check mode, we do not necessarely have an instance
+ if instance:
+ instance = self.ensure_tags(resource=instance, resource_type='UserVm')
+ # refresh instance data
+ self.instance = instance
+
+ return instance
+
+
+ def get_user_data(self):
+ user_data = self.module.params.get('user_data')
+ if user_data is not None:
+ user_data = base64.b64encode(str(user_data))
+ return user_data
+
+
+ def get_details(self):
+ res = None
+ cpu = self.module.params.get('cpu')
+ cpu_speed = self.module.params.get('cpu_speed')
+ memory = self.module.params.get('memory')
+ if all([cpu, cpu_speed, memory]):
+ res = [{
+ 'cpuNumber': cpu,
+ 'cpuSpeed': cpu_speed,
+ 'memory': memory,
+ }]
+ return res
+
+
+ def deploy_instance(self, start_vm=True):
+ self.result['changed'] = True
+ networkids = self.get_network_ids()
+ if networkids is not None:
+ networkids = ','.join(networkids)
+
+ args = {}
+ args['templateid'] = self.get_template_or_iso(key='id')
+ if not args['templateid']:
+ self.module.fail_json(msg="Template or ISO is required.")
+
+ args['zoneid'] = self.get_zone(key='id')
+ args['serviceofferingid'] = self.get_service_offering_id()
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['diskofferingid'] = self.get_disk_offering_id()
+ args['networkids'] = networkids
+ args['iptonetworklist'] = self.get_iptonetwork_mappings()
+ args['userdata'] = self.get_user_data()
+ args['keyboard'] = self.module.params.get('keyboard')
+ args['ipaddress'] = self.module.params.get('ip_address')
+ args['ip6address'] = self.module.params.get('ip6_address')
+ args['name'] = self.module.params.get('name')
+ args['displayname'] = self.get_or_fallback('display_name', 'name')
+ args['group'] = self.module.params.get('group')
+ args['keypair'] = self.module.params.get('ssh_key')
+ args['size'] = self.module.params.get('disk_size')
+ args['startvm'] = start_vm
+ args['rootdisksize'] = self.module.params.get('root_disk_size')
+ args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
+ args['details'] = self.get_details()
+
+ security_groups = self.module.params.get('security_groups')
+ if security_groups is not None:
+ args['securitygroupnames'] = ','.join(security_groups)
+
+ template_iso = self.get_template_or_iso()
+ if 'hypervisor' not in template_iso:
+ args['hypervisor'] = self.get_hypervisor()
+
+ instance = None
+ if not self.module.check_mode:
+ instance = self.cs.deployVirtualMachine(**args)
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+ return instance
+
+
+ def update_instance(self, instance, start_vm=True):
+ # Service offering data
+ args_service_offering = {}
+ args_service_offering['id'] = instance['id']
+ if self.module.params.get('service_offering'):
+ args_service_offering['serviceofferingid'] = self.get_service_offering_id()
+ service_offering_changed = self.has_changed(args_service_offering, instance)
+
+ # Instance data
+ args_instance_update = {}
+ args_instance_update['id'] = instance['id']
+ args_instance_update['userdata'] = self.get_user_data()
+ args_instance_update['ostypeid'] = self.get_os_type(key='id')
+ if self.module.params.get('group'):
+ args_instance_update['group'] = self.module.params.get('group')
+ if self.module.params.get('display_name'):
+ args_instance_update['displayname'] = self.module.params.get('display_name')
+ instance_changed = self.has_changed(args_instance_update, instance)
+
+ # SSH key data
+ args_ssh_key = {}
+ args_ssh_key['id'] = instance['id']
+ args_ssh_key['projectid'] = self.get_project(key='id')
+ if self.module.params.get('ssh_key'):
+ args_ssh_key['keypair'] = self.module.params.get('ssh_key')
+ ssh_key_changed = self.has_changed(args_ssh_key, instance)
+
+ security_groups_changed = self.security_groups_has_changed()
+
+ changed = [
+ service_offering_changed,
+ instance_changed,
+ security_groups_changed,
+ ssh_key_changed,
+ ]
+
+ if True in changed:
+ force = self.module.params.get('force')
+ instance_state = instance['state'].lower()
+ if instance_state == 'stopped' or force:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+
+ # Ensure VM has stopped
+ instance = self.stop_instance()
+ instance = self.poll_job(instance, 'virtualmachine')
+ self.instance = instance
+
+ # Change service offering
+ if service_offering_changed:
+ res = self.cs.changeServiceForVirtualMachine(**args_service_offering)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance = res['virtualmachine']
+ self.instance = instance
+
+ # Update VM
+ if instance_changed or security_groups_changed:
+ if security_groups_changed:
+ args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
+ res = self.cs.updateVirtualMachine(**args_instance_update)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance = res['virtualmachine']
+ self.instance = instance
+
+ # Reset SSH key
+ if ssh_key_changed:
+ instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key)
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ instance = self.poll_job(instance, 'virtualmachine')
+ self.instance = instance
+
+ # Start VM again if it was running before
+ if instance_state == 'running' and start_vm:
+ instance = self.start_instance()
+ return instance
+
+
+ def recover_instance(self, instance):
+ if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.recoverVirtualMachine(id=instance['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance = res['virtualmachine']
+ return instance
+
+
+ def absent_instance(self):
+ instance = self.get_instance()
+ if instance:
+ if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.destroyVirtualMachine(id=instance['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(res, 'virtualmachine')
+ return instance
+
+
+ def expunge_instance(self):
+ instance = self.get_instance()
+ if instance:
+ res = {}
+ if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
+
+ elif instance['state'].lower() not in [ 'expunging' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
+
+ if res and 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'virtualmachine')
+ return instance
+
+
+ def stop_instance(self):
+ instance = self.get_instance()
+ # in check mode intance may not be instanciated
+ if instance:
+ if instance['state'].lower() in ['stopping', 'stopped']:
+ return instance
+
+ if instance['state'].lower() in ['starting', 'running']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ instance = self.cs.stopVirtualMachine(id=instance['id'])
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+ return instance
+
+
+ def start_instance(self):
+ instance = self.get_instance()
+ # in check mode intance may not be instanciated
+ if instance:
+ if instance['state'].lower() in ['starting', 'running']:
+ return instance
+
+ if instance['state'].lower() in ['stopped', 'stopping']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ instance = self.cs.startVirtualMachine(id=instance['id'])
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+ return instance
+
+
+ def restart_instance(self):
+ instance = self.get_instance()
+ # in check mode intance may not be instanciated
+ if instance:
+ if instance['state'].lower() in [ 'running', 'starting' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ instance = self.cs.rebootVirtualMachine(id=instance['id'])
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+
+ elif instance['state'].lower() in [ 'stopping', 'stopped' ]:
+ instance = self.start_instance()
+ return instance
+
+
+ def restore_instance(self):
+ instance = self.get_instance()
+ self.result['changed'] = True
+ # in check mode intance may not be instanciated
+ if instance:
+ args = {}
+ args['templateid'] = self.get_template_or_iso(key='id')
+ args['virtualmachineid'] = instance['id']
+ res = self.cs.restoreVirtualMachine(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(res, 'virtualmachine')
+ return instance
+
+
+ def get_result(self, instance):
+ super(AnsibleCloudStackInstance, self).get_result(instance)
+ if instance:
+ if 'securitygroup' in instance:
+ security_groups = []
+ for securitygroup in instance['securitygroup']:
+ security_groups.append(securitygroup['name'])
+ self.result['security_groups'] = security_groups
+ if 'affinitygroup' in instance:
+ affinity_groups = []
+ for affinitygroup in instance['affinitygroup']:
+ affinity_groups.append(affinitygroup['name'])
+ self.result['affinity_groups'] = affinity_groups
+ if 'nic' in instance:
+ for nic in instance['nic']:
+ if nic['isdefault'] and 'ipaddress' in nic:
+ self.result['default_ip'] = nic['ipaddress']
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(default=None),
+ display_name = dict(default=None),
+ group = dict(default=None),
+ state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'),
+ service_offering = dict(default=None),
+ cpu = dict(default=None, type='int'),
+ cpu_speed = dict(default=None, type='int'),
+ memory = dict(default=None, type='int'),
+ template = dict(default=None),
+ iso = dict(default=None),
+ template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
+ networks = dict(type='list', aliases=[ 'network' ], default=None),
+ ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None),
+ ip_address = dict(defaul=None),
+ ip6_address = dict(defaul=None),
+ disk_offering = dict(default=None),
+ disk_size = dict(type='int', default=None),
+ root_disk_size = dict(type='int', default=None),
+ keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None),
+ hypervisor = dict(choices=CS_HYPERVISORS, default=None),
+ security_groups = dict(type='list', aliases=[ 'security_group' ], default=None),
+ affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ user_data = dict(default=None),
+ zone = dict(default=None),
+ ssh_key = dict(default=None),
+ force = dict(type='bool', default=False),
+ tags = dict(type='list', aliases=[ 'tag' ], default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ required_together = cs_required_together()
+ required_together.extend([
+ ['cpu', 'cpu_speed', 'memory'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ required_one_of = (
+ ['display_name', 'name'],
+ ),
+ mutually_exclusive = (
+ ['template', 'iso'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_instance = AnsibleCloudStackInstance(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent', 'destroyed']:
+ instance = acs_instance.absent_instance()
+
+ elif state in ['expunged']:
+ instance = acs_instance.expunge_instance()
+
+ elif state in ['restored']:
+ acs_instance.present_instance()
+ instance = acs_instance.restore_instance()
+
+ elif state in ['present', 'deployed']:
+ instance = acs_instance.present_instance()
+
+ elif state in ['stopped']:
+ acs_instance.present_instance(start_vm=False)
+ instance = acs_instance.stop_instance()
+
+ elif state in ['started']:
+ acs_instance.present_instance()
+ instance = acs_instance.start_instance()
+
+ elif state in ['restarted']:
+ acs_instance.present_instance()
+ instance = acs_instance.restart_instance()
+
+ if instance and 'state' in instance and instance['state'].lower() == 'error':
+ module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
+
+ result = acs_instance.get_result(instance)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_instance_facts.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_instance_facts.py
new file mode 100644
index 0000000000..f405debca3
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_instance_facts.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_instance_facts
+short_description: Gathering facts from the API of instances from Apache CloudStack based clouds.
+description:
+ - Gathering facts from the API of an instance.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name or display name of the instance.
+ required: true
+ domain:
+ description:
+ - Domain the instance is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Project the instance is related to.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- local_action:
+ module: cs_instance_facts
+ name: web-vm-1
+
+- debug: var=cloudstack_instance
+'''
+
+RETURN = '''
+---
+cloudstack_instance.id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+cloudstack_instance.name:
+ description: Name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+cloudstack_instance.display_name:
+ description: Display name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+cloudstack_instance.group:
+ description: Group name of the instance is related.
+ returned: success
+ type: string
+ sample: web
+created:
+ description: Date of the instance was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+cloudstack_instance.password_enabled:
+ description: True if password setting is enabled.
+ returned: success
+ type: boolean
+ sample: true
+cloudstack_instance.password:
+ description: The password of the instance if exists.
+ returned: success
+ type: string
+ sample: Ge2oe7Do
+cloudstack_instance.ssh_key:
+ description: Name of SSH key deployed to instance.
+ returned: success
+ type: string
+ sample: key@work
+cloudstack_instance.domain:
+ description: Domain the instance is related to.
+ returned: success
+ type: string
+ sample: example domain
+cloudstack_instance.account:
+ description: Account the instance is related to.
+ returned: success
+ type: string
+ sample: example account
+cloudstack_instance.project:
+ description: Name of project the instance is related to.
+ returned: success
+ type: string
+ sample: Production
+cloudstack_instance.default_ip:
+ description: Default IP address of the instance.
+ returned: success
+ type: string
+ sample: 10.23.37.42
+cloudstack_instance.public_ip:
+ description: Public IP address with instance via static NAT rule.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+cloudstack_instance.iso:
+ description: Name of ISO the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+cloudstack_instance.template:
+ description: Name of template the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+cloudstack_instance.service_offering:
+ description: Name of the service offering the instance has.
+ returned: success
+ type: string
+ sample: 2cpu_2gb
+cloudstack_instance.zone:
+ description: Name of zone the instance is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+cloudstack_instance.state:
+ description: State of the instance.
+ returned: success
+ type: string
+ sample: Running
+cloudstack_instance.security_groups:
+ description: Security groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "default" ]'
+cloudstack_instance.affinity_groups:
+ description: Affinity groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "webservers" ]'
+cloudstack_instance.tags:
+ description: List of resource tags associated with the instance.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+cloudstack_instance.hypervisor:
+ description: Hypervisor related to this instance.
+ returned: success
+ type: string
+ sample: KVM
+cloudstack_instance.instance_name:
+ description: Internal name of the instance (ROOT admin only).
+ returned: success
+ type: string
+ sample: i-44-3992-VM
+'''
+
+import base64
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackInstanceFacts(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackInstanceFacts, self).__init__(module)
+ self.instance = None
+ self.returns = {
+ 'group': 'group',
+ 'hypervisor': 'hypervisor',
+ 'instancename': 'instance_name',
+ 'publicip': 'public_ip',
+ 'passwordenabled': 'password_enabled',
+ 'password': 'password',
+ 'serviceofferingname': 'service_offering',
+ 'isoname': 'iso',
+ 'templatename': 'template',
+ 'keypair': 'ssh_key',
+ }
+ self.facts = {
+ 'cloudstack_instance': None,
+ }
+
+
+ def get_instance(self):
+ instance = self.instance
+ if not instance:
+ instance_name = self.module.params.get('name')
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ # Do not pass zoneid, as the instance name must be unique across zones.
+ instances = self.cs.listVirtualMachines(**args)
+ if instances:
+ for v in instances['virtualmachine']:
+ if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
+ self.instance = v
+ break
+ return self.instance
+
+
+ def run(self):
+ instance = self.get_instance()
+ if not instance:
+ self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name'))
+ self.facts['cloudstack_instance'] = self.get_result(instance)
+ return self.facts
+
+
+ def get_result(self, instance):
+ super(AnsibleCloudStackInstanceFacts, self).get_result(instance)
+ if instance:
+ if 'securitygroup' in instance:
+ security_groups = []
+ for securitygroup in instance['securitygroup']:
+ security_groups.append(securitygroup['name'])
+ self.result['security_groups'] = security_groups
+ if 'affinitygroup' in instance:
+ affinity_groups = []
+ for affinitygroup in instance['affinitygroup']:
+ affinity_groups.append(affinitygroup['name'])
+ self.result['affinity_groups'] = affinity_groups
+ if 'nic' in instance:
+ for nic in instance['nic']:
+ if nic['isdefault'] and 'ipaddress' in nic:
+ self.result['default_ip'] = nic['ipaddress']
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ )
+
+ cs_instance_facts = AnsibleCloudStackInstanceFacts(module=module).run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_instance_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_instancegroup.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_instancegroup.py
new file mode 100644
index 0000000000..323e039121
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_instancegroup.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_instancegroup
+short_description: Manages instance groups on Apache CloudStack based clouds.
+description:
+ - Create and remove instance groups.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the instance group.
+ required: true
+ domain:
+ description:
+ - Domain the instance group is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance group is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Project the instance group is related to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the instance group.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create an instance group
+- local_action:
+ module: cs_instancegroup
+ name: loadbalancers
+
+# Remove an instance group
+- local_action:
+ module: cs_instancegroup
+ name: loadbalancers
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the instance group.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the instance group.
+ returned: success
+ type: string
+ sample: webservers
+created:
+ description: Date when the instance group was created.
+ returned: success
+ type: string
+ sample: 2015-05-03T15:05:51+0200
+domain:
+ description: Domain the instance group is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the instance group is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Project the instance group is related to.
+ returned: success
+ type: string
+ sample: example project
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackInstanceGroup(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackInstanceGroup, self).__init__(module)
+ self.instance_group = None
+
+
+ def get_instance_group(self):
+ if self.instance_group:
+ return self.instance_group
+
+ name = self.module.params.get('name')
+
+ args = {}
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ instance_groups = self.cs.listInstanceGroups(**args)
+ if instance_groups:
+ for g in instance_groups['instancegroup']:
+ if name in [ g['name'], g['id'] ]:
+ self.instance_group = g
+ break
+ return self.instance_group
+
+
+ def present_instance_group(self):
+ instance_group = self.get_instance_group()
+ if not instance_group:
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ if not self.module.check_mode:
+ res = self.cs.createInstanceGroup(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance_group = res['instancegroup']
+ return instance_group
+
+
+ def absent_instance_group(self):
+ instance_group = self.get_instance_group()
+ if instance_group:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.deleteInstanceGroup(id=instance_group['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return instance_group
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ state = dict(default='present', choices=['present', 'absent']),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_ig = AnsibleCloudStackInstanceGroup(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ instance_group = acs_ig.absent_instance_group()
+ else:
+ instance_group = acs_ig.present_instance_group()
+
+ result = acs_ig.get_result(instance_group)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_ip_address.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_ip_address.py
new file mode 100644
index 0000000000..4d4eae2f78
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_ip_address.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Darren Worrall <darren@iweb.co.uk>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_ip_address
+short_description: Manages public IP address associations on Apache CloudStack based clouds.
+description:
+ - Acquires and associates a public IP to an account or project. Due to API
+ limitations this is not an idempotent call, so be sure to only
+ conditionally call this when C(state=present)
+version_added: '2.0'
+author:
+ - "Darren Worrall (@dazworrall)"
+ - "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address.
+ - Required if C(state=absent)
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the IP address is related to.
+ required: false
+ default: null
+ network:
+ description:
+ - Network the IP address is related to.
+ required: false
+ default: null
+ vpc:
+ description:
+ - VPC the IP address is related to.
+ required: false
+ default: null
+ version_added: "2.2"
+ account:
+ description:
+ - Account the IP address is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the IP address is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the IP address is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Associate an IP address conditonally
+- local_action:
+ module: cs_ip_address
+ network: My Network
+ register: ip_address
+ when: instance.public_ip is undefined
+
+# Disassociate an IP address
+- local_action:
+ module: cs_ip_address
+ ip_address: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the Public IP address.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+zone:
+ description: Name of zone the IP address is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the IP address is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the IP address is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the IP address is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackIPAddress(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackIPAddress, self).__init__(module)
+ self.returns = {
+ 'ipaddress': 'ip_address',
+ }
+
+
+ #TODO: Add to parent class, duplicated in cs_network
+ def get_network(self, key=None, network=None):
+ if not network:
+ network = self.module.params.get('network')
+
+ if not network:
+ return None
+
+ args = {}
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+ args['zoneid'] = self.get_zone('id')
+
+ networks = self.cs.listNetworks(**args)
+ if not networks:
+ self.module.fail_json(msg="No networks available")
+
+ for n in networks['network']:
+ if network in [ n['displaytext'], n['name'], n['id'] ]:
+ return self._get_by_key(key, n)
+ self.module.fail_json(msg="Network '%s' not found" % network)
+
+
+ #TODO: Merge changes here with parent class
+ def get_ip_address(self, key=None):
+ if self.ip_address:
+ return self._get_by_key(key, self.ip_address)
+
+ ip_address = self.module.params.get('ip_address')
+ if not ip_address:
+ self.module.fail_json(msg="IP address param 'ip_address' is required")
+
+ args = {}
+ args['ipaddress'] = ip_address
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['vpcid'] = self.get_vpc(key='id')
+ ip_addresses = self.cs.listPublicIpAddresses(**args)
+
+ if ip_addresses:
+ self.ip_address = ip_addresses['publicipaddress'][0]
+ return self._get_by_key(key, self.ip_address)
+
+
+ def associate_ip_address(self):
+ self.result['changed'] = True
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['networkid'] = self.get_network(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ ip_address = {}
+ if not self.module.check_mode:
+ res = self.cs.associateIpAddress(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'ipaddress')
+ ip_address = res
+ return ip_address
+
+
+ def disassociate_ip_address(self):
+ ip_address = self.get_ip_address()
+ if ip_address is None:
+ return ip_address
+ if ip_address['isstaticnat']:
+ self.module.fail_json(msg="IP address is allocated via static nat")
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disassociateIpAddress(id=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'ipaddress')
+ return ip_address
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(required=False),
+ state = dict(choices=['present', 'absent'], default='present'),
+ vpc = dict(default=None),
+ network = dict(default=None),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_ip_address = AnsibleCloudStackIPAddress(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ ip_address = acs_ip_address.disassociate_ip_address()
+ else:
+ ip_address = acs_ip_address.associate_ip_address()
+
+ result = acs_ip_address.get_result(ip_address)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_iso.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_iso.py
new file mode 100644
index 0000000000..a61fb18078
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_iso.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_iso
+short_description: Manages ISO images on Apache CloudStack based clouds.
+description:
+ - Register and remove ISO images.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the ISO.
+ required: true
+ url:
+ description:
+ - URL where the ISO can be downloaded from. Required if C(state) is present.
+ required: false
+ default: null
+ os_type:
+ description:
+ - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
+ required: false
+ default: null
+ is_ready:
+ description:
+ - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false).
+ required: false
+ default: false
+ aliases: []
+ is_public:
+ description:
+ - Register the ISO to be publicly available to all users. Only used if C(state) is present.
+ required: false
+ default: false
+ is_featured:
+ description:
+ - Register the ISO to be featured. Only used if C(state) is present.
+ required: false
+ default: false
+ is_dynamically_scalable:
+ description:
+ - Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
+ required: false
+ default: false
+ aliases: []
+ checksum:
+ description:
+ - The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
+ required: false
+ default: false
+ bootable:
+ description:
+ - Register the ISO to be bootable. Only used if C(state) is present.
+ required: false
+ default: true
+ domain:
+ description:
+ - Domain the ISO is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the ISO is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the ISO to be registered in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used.
+ required: false
+ default: null
+ iso_filter:
+ description:
+ - Name of the filter used to search for the ISO.
+ required: false
+ default: 'self'
+ choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
+ state:
+ description:
+ - State of the ISO.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Register an ISO if ISO name does not already exist.
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
+ os_type: Debian GNU/Linux 7(64-bit)
+
+# Register an ISO with given name if ISO md5 checksum does not already exist.
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
+ os_type: Debian GNU/Linux 7(64-bit)
+ checksum: 0b31bccccb048d20b551f70830bb7ad0
+
+# Remove an ISO by name
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ state: absent
+
+# Remove an ISO by checksum
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ checksum: 0b31bccccb048d20b551f70830bb7ad0
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the ISO.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the ISO.
+ returned: success
+ type: string
+ sample: Debian 7 64-bit
+display_text:
+ description: Text to be displayed of the ISO.
+ returned: success
+ type: string
+ sample: Debian 7.7 64-bit minimal 2015-03-19
+zone:
+ description: Name of zone the ISO is registered in.
+ returned: success
+ type: string
+ sample: zuerich
+status:
+ description: Status of the ISO.
+ returned: success
+ type: string
+ sample: Successfully Installed
+is_ready:
+ description: True if the ISO is ready to be deployed from.
+ returned: success
+ type: boolean
+ sample: true
+checksum:
+ description: MD5 checksum of the ISO.
+ returned: success
+ type: string
+ sample: 0b31bccccb048d20b551f70830bb7ad0
+created:
+ description: Date of registering.
+ returned: success
+ type: string
+ sample: 2015-03-29T14:57:06+0200
+domain:
+ description: Domain the ISO is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the ISO is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Project the ISO is related to.
+ returned: success
+ type: string
+ sample: example project
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackIso(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackIso, self).__init__(module)
+ self.returns = {
+ 'checksum': 'checksum',
+ 'status': 'status',
+ 'isready': 'is_ready',
+ }
+ self.iso = None
+
+ def register_iso(self):
+ iso = self.get_iso()
+ if not iso:
+
+ args = {}
+ args['zoneid'] = self.get_zone('id')
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['bootable'] = self.module.params.get('bootable')
+ args['ostypeid'] = self.get_os_type('id')
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.module.params.get('name')
+ args['checksum'] = self.module.params.get('checksum')
+ args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
+ args['isfeatured'] = self.module.params.get('is_featured')
+ args['ispublic'] = self.module.params.get('is_public')
+
+ if args['bootable'] and not args['ostypeid']:
+ self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
+
+ args['url'] = self.module.params.get('url')
+ if not args['url']:
+ self.module.fail_json(msg="URL is requried.")
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.registerIso(**args)
+ iso = res['iso'][0]
+ return iso
+
+
+ def get_iso(self):
+ if not self.iso:
+
+ args = {}
+ args['isready'] = self.module.params.get('is_ready')
+ args['isofilter'] = self.module.params.get('iso_filter')
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['zoneid'] = self.get_zone('id')
+
+ # if checksum is set, we only look on that.
+ checksum = self.module.params.get('checksum')
+ if not checksum:
+ args['name'] = self.module.params.get('name')
+
+ isos = self.cs.listIsos(**args)
+ if isos:
+ if not checksum:
+ self.iso = isos['iso'][0]
+ else:
+ for i in isos['iso']:
+ if i['checksum'] == checksum:
+ self.iso = i
+ break
+ return self.iso
+
+
+ def remove_iso(self):
+ iso = self.get_iso()
+ if iso:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = iso['id']
+ args['projectid'] = self.get_project('id')
+ args['zoneid'] = self.get_zone('id')
+
+ if not self.module.check_mode:
+ res = self.cs.deleteIso(**args)
+ return iso
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ url = dict(default=None),
+ os_type = dict(default=None),
+ zone = dict(default=None),
+ iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ checksum = dict(default=None),
+ is_ready = dict(type='bool', default=False),
+ bootable = dict(type='bool', default=True),
+ is_featured = dict(type='bool', default=False),
+ is_dynamically_scalable = dict(type='bool', default=False),
+ state = dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_iso = AnsibleCloudStackIso(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ iso = acs_iso.remove_iso()
+ else:
+ iso = acs_iso.register_iso()
+
+ result = acs_iso.get_result(iso)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule.py
new file mode 100644
index 0000000000..83eb888360
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Darren Worrall <darren@iweb.co.uk>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_loadbalancer_rule
+short_description: Manages load balancer rules on Apache CloudStack based clouds.
+description:
+ - Add, update and remove load balancer rules.
+version_added: '2.0'
+author:
+ - "Darren Worrall (@dazworrall)"
+ - "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - The name of the load balancer rule.
+ required: true
+ description:
+ description:
+ - The description of the load balancer rule.
+ required: false
+ default: null
+ algorithm:
+ description:
+ - Load balancer algorithm
+ - Required when using C(state=present).
+ required: false
+ choices: [ 'source', 'roundrobin', 'leastconn' ]
+ default: 'source'
+ private_port:
+ description:
+ - The private port of the private ip address/virtual machine where the network traffic will be load balanced to.
+ - Required when using C(state=present).
+ - Can not be changed once the rule exists due API limitation.
+ required: false
+ default: null
+ public_port:
+ description:
+ - The public port from where the network traffic will be load balanced from.
+ - Required when using C(state=present).
+ - Can not be changed once the rule exists due API limitation.
+ required: true
+ default: null
+ ip_address:
+ description:
+ - Public IP address from where the network traffic will be load balanced from.
+ required: true
+ aliases: [ 'public_ip' ]
+ open_firewall:
+ description:
+ - Whether the firewall rule for public port should be created, while creating the new rule.
+ - Use M(cs_firewall) for managing firewall rules.
+ required: false
+ default: false
+ cidr:
+ description:
+ - CIDR (full notation) to be used for firewall rule if required.
+ required: false
+ default: null
+ protocol:
+ description:
+ - The protocol to be used on the load balancer
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the load balancer IP address is related to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the rule.
+ required: true
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the rule is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the rule is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the rule shoud be created.
+ - If not set, default zone is used.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a load balancer rule
+- local_action:
+ module: cs_loadbalancer_rule
+ name: balance_http
+ public_ip: 1.2.3.4
+ algorithm: leastconn
+ public_port: 80
+ private_port: 8080
+
+# update algorithm of an existing load balancer rule
+- local_action:
+ module: cs_loadbalancer_rule
+ name: balance_http
+ public_ip: 1.2.3.4
+ algorithm: roundrobin
+ public_port: 80
+ private_port: 8080
+
+# Delete a load balancer rule
+- local_action:
+ module: cs_loadbalancer_rule
+ name: balance_http
+ public_ip: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the rule.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+zone:
+ description: Name of zone the rule is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the rule is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the rule is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the rule is related to.
+ returned: success
+ type: string
+ sample: example domain
+algorithm:
+ description: Load balancer algorithm used.
+ returned: success
+ type: string
+ sample: "source"
+cidr:
+ description: CIDR to forward traffic from.
+ returned: success
+ type: string
+ sample: ""
+name:
+ description: Name of the rule.
+ returned: success
+ type: string
+ sample: "http-lb"
+description:
+ description: Description of the rule.
+ returned: success
+ type: string
+ sample: "http load balancer rule"
+protocol:
+ description: Protocol of the rule.
+ returned: success
+ type: string
+ sample: "tcp"
+public_port:
+ description: Public port.
+ returned: success
+ type: string
+ sample: 80
+private_port:
+ description: Private IP address.
+ returned: success
+ type: string
+ sample: 80
+public_ip:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: "1.2.3.4"
+tags:
+ description: List of resource tags associated with the rule.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+state:
+ description: State of the rule.
+ returned: success
+ type: string
+ sample: "Add"
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackLBRule(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackLBRule, self).__init__(module)
+ self.returns = {
+ 'publicip': 'public_ip',
+ 'algorithm': 'algorithm',
+ 'cidrlist': 'cidr',
+ 'protocol': 'protocol',
+ }
+ # these values will be casted to int
+ self.returns_to_int = {
+ 'publicport': 'public_port',
+ 'privateport': 'private_port',
+ }
+
+
+ def get_rule(self, **kwargs):
+ rules = self.cs.listLoadBalancerRules(**kwargs)
+ if rules:
+ return rules['loadbalancerrule'][0]
+
+
+ def _get_common_args(self):
+ return {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ 'publicipid': self.get_ip_address(key='id'),
+ 'name': self.module.params.get('name'),
+ }
+
+
+ def present_lb_rule(self):
+ missing_params = []
+ for required_params in [
+ 'algorithm',
+ 'private_port',
+ 'public_port',
+ ]:
+ if not self.module.params.get(required_params):
+ missing_params.append(required_params)
+ if missing_params:
+ self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
+
+ args = self._get_common_args()
+ rule = self.get_rule(**args)
+ if rule:
+ rule = self._update_lb_rule(rule)
+ else:
+ rule = self._create_lb_rule(rule)
+
+ if rule:
+ rule = self.ensure_tags(resource=rule, resource_type='LoadBalancer')
+ return rule
+
+
+ def _create_lb_rule(self, rule):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = self._get_common_args()
+ args['algorithm'] = self.module.params.get('algorithm')
+ args['privateport'] = self.module.params.get('private_port')
+ args['publicport'] = self.module.params.get('public_port')
+ args['cidrlist'] = self.module.params.get('cidr')
+ args['description'] = self.module.params.get('description')
+ args['protocol'] = self.module.params.get('protocol')
+ res = self.cs.createLoadBalancerRule(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ rule = self.poll_job(res, 'loadbalancer')
+ return rule
+
+
+ def _update_lb_rule(self, rule):
+ args = {}
+ args['id'] = rule['id']
+ args['algorithm'] = self.module.params.get('algorithm')
+ args['description'] = self.module.params.get('description')
+ if self.has_changed(args, rule):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateLoadBalancerRule(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ rule = self.poll_job(res, 'loadbalancer')
+ return rule
+
+
+ def absent_lb_rule(self):
+ args = self._get_common_args()
+ rule = self.get_rule(**args)
+ if rule:
+ self.result['changed'] = True
+ if rule and not self.module.check_mode:
+ res = self.cs.deleteLoadBalancerRule(id=rule['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'loadbalancer')
+ return rule
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ description = dict(default=None),
+ algorithm = dict(choices=['source', 'roundrobin', 'leastconn'], default='source'),
+ private_port = dict(type='int', default=None),
+ public_port = dict(type='int', default=None),
+ protocol = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ ip_address = dict(required=True, aliases=['public_ip']),
+ cidr = dict(default=None),
+ project = dict(default=None),
+ open_firewall = dict(type='bool', default=False),
+ tags = dict(type='list', aliases=['tag'], default=None),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_lb_rule = AnsibleCloudStackLBRule(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ rule = acs_lb_rule.absent_lb_rule()
+ else:
+ rule = acs_lb_rule.present_lb_rule()
+
+ result = acs_lb_rule.get_result(rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule_member.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule_member.py
new file mode 100644
index 0000000000..c5410491a1
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_loadbalancer_rule_member.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Darren Worrall <darren@iweb.co.uk>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_loadbalancer_rule_member
+short_description: Manages load balancer rule members on Apache CloudStack based clouds.
+description:
+ - Add and remove load balancer rule members.
+version_added: '2.0'
+author:
+ - "Darren Worrall (@dazworrall)"
+ - "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - The name of the load balancer rule.
+ required: true
+ ip_address:
+ description:
+ - Public IP address from where the network traffic will be load balanced from.
+ - Only needed to find the rule if C(name) is not unique.
+ required: false
+ default: null
+ aliases: [ 'public_ip' ]
+ vms:
+ description:
+ - List of VMs to assign to or remove from the rule.
+ required: true
+ type: list
+ aliases: [ 'vm' ]
+ state:
+ description:
+ - Should the VMs be present or absent from the rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ project:
+ description:
+ - Name of the project the firewall rule is related to.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the rule is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the rule is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the rule should be located.
+ - If not set, default zone is used.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Add VMs to an exising load balancer
+- local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vms:
+ - web01
+ - web02
+
+# Remove a VM from an existing load balancer
+- local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vms:
+ - web01
+ - web02
+ state: absent
+
+# Rolling upgrade of hosts
+- hosts: webservers
+ serial: 1
+ pre_tasks:
+ - name: Remove from load balancer
+ local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vm: "{{ ansible_hostname }}"
+ state: absent
+ tasks:
+ # Perform update
+ post_tasks:
+ - name: Add to load balancer
+ local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vm: "{{ ansible_hostname }}"
+ state: present
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the rule.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+zone:
+ description: Name of zone the rule is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the rule is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the rule is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the rule is related to.
+ returned: success
+ type: string
+ sample: example domain
+algorithm:
+ description: Load balancer algorithm used.
+ returned: success
+ type: string
+ sample: "source"
+cidr:
+ description: CIDR to forward traffic from.
+ returned: success
+ type: string
+ sample: ""
+name:
+ description: Name of the rule.
+ returned: success
+ type: string
+ sample: "http-lb"
+description:
+ description: Description of the rule.
+ returned: success
+ type: string
+ sample: "http load balancer rule"
+protocol:
+ description: Protocol of the rule.
+ returned: success
+ type: string
+ sample: "tcp"
+public_port:
+ description: Public port.
+ returned: success
+ type: string
+ sample: 80
+private_port:
+ description: Private IP address.
+ returned: success
+ type: string
+ sample: 80
+public_ip:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: "1.2.3.4"
+vms:
+ description: Rule members.
+ returned: success
+ type: list
+ sample: '[ "web01", "web02" ]'
+tags:
+ description: List of resource tags associated with the rule.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+state:
+ description: State of the rule.
+ returned: success
+ type: string
+ sample: "Add"
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackLBRuleMember(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackLBRuleMember, self).__init__(module)
+ self.returns = {
+ 'publicip': 'public_ip',
+ 'algorithm': 'algorithm',
+ 'cidrlist': 'cidr',
+ 'protocol': 'protocol',
+ }
+ # these values will be casted to int
+ self.returns_to_int = {
+ 'publicport': 'public_port',
+ 'privateport': 'private_port',
+ }
+
+
+ def get_rule(self):
+ args = self._get_common_args()
+ args['name'] = self.module.params.get('name')
+ args['zoneid'] = self.get_zone(key='id')
+ if self.module.params.get('ip_address'):
+ args['publicipid'] = self.get_ip_address(key='id')
+ rules = self.cs.listLoadBalancerRules(**args)
+ if rules:
+ if len(rules['loadbalancerrule']) > 1:
+ self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name'])
+ return rules['loadbalancerrule'][0]
+ return None
+
+
+ def _get_common_args(self):
+ return {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ }
+
+
+ def _get_members_of_rule(self, rule):
+ res = self.cs.listLoadBalancerRuleInstances(id=rule['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return res.get('loadbalancerruleinstance', [])
+
+
+ def _ensure_members(self, operation):
+ if operation not in ['add', 'remove']:
+ self.module.fail_json(msg="Bad operation: %s" % operation)
+
+ rule = self.get_rule()
+ if not rule:
+ self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name'))
+
+ existing = {}
+ for vm in self._get_members_of_rule(rule=rule):
+ existing[vm['name']] = vm['id']
+
+ wanted_names = self.module.params.get('vms')
+
+ if operation =='add':
+ cs_func = self.cs.assignToLoadBalancerRule
+ to_change = set(wanted_names) - set(existing.keys())
+ else:
+ cs_func = self.cs.removeFromLoadBalancerRule
+ to_change = set(wanted_names) & set(existing.keys())
+
+ if not to_change:
+ return rule
+
+ args = self._get_common_args()
+ vms = self.cs.listVirtualMachines(**args)
+ to_change_ids = []
+ for name in to_change:
+ for vm in vms.get('virtualmachine', []):
+ if vm['name'] == name:
+ to_change_ids.append(vm['id'])
+ break
+ else:
+ self.module.fail_json(msg="Unknown VM: %s" % name)
+
+ if to_change_ids:
+ self.result['changed'] = True
+
+ if to_change_ids and not self.module.check_mode:
+ res = cs_func(
+ id = rule['id'],
+ virtualmachineids = to_change_ids,
+ )
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res)
+ rule = self.get_rule()
+ return rule
+
+
+ def add_members(self):
+ return self._ensure_members('add')
+
+
+ def remove_members(self):
+ return self._ensure_members('remove')
+
+
+ def get_result(self, rule):
+ super(AnsibleCloudStackLBRuleMember, self).get_result(rule)
+ if rule:
+ self.result['vms'] = []
+ for vm in self._get_members_of_rule(rule=rule):
+ self.result['vms'].append(vm['name'])
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ ip_address = dict(default=None, aliases=['public_ip']),
+ vms = dict(required=True, aliases=['vm'], type='list'),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ project = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ rule = acs_lb_rule_member.remove_members()
+ else:
+ rule = acs_lb_rule_member.add_members()
+
+ result = acs_lb_rule_member.get_result(rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_network.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_network.py
new file mode 100644
index 0000000000..69206d8105
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_network.py
@@ -0,0 +1,580 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_network
+short_description: Manages networks on Apache CloudStack based clouds.
+description:
+ - Create, update, restart and delete networks.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name (case sensitive) of the network.
+ required: true
+ display_text:
+ description:
+ - Display text of the network.
+ - If not specified, C(name) will be used as C(display_text).
+ required: false
+ default: null
+ network_offering:
+ description:
+ - Name of the offering for the network.
+ - Required if C(state=present).
+ required: false
+ default: null
+ start_ip:
+ description:
+ - The beginning IPv4 address of the network belongs to.
+ - Only considered on create.
+ required: false
+ default: null
+ end_ip:
+ description:
+ - The ending IPv4 address of the network belongs to.
+ - If not specified, value of C(start_ip) is used.
+ - Only considered on create.
+ required: false
+ default: null
+ gateway:
+ description:
+ - The gateway of the network.
+ - Required for shared networks and isolated networks when it belongs to VPC.
+ - Only considered on create.
+ required: false
+ default: null
+ netmask:
+ description:
+ - The netmask of the network.
+ - Required for shared networks and isolated networks when it belongs to VPC.
+ - Only considered on create.
+ required: false
+ default: null
+ start_ipv6:
+ description:
+ - The beginning IPv6 address of the network belongs to.
+ - Only considered on create.
+ required: false
+ default: null
+ end_ipv6:
+ description:
+ - The ending IPv6 address of the network belongs to.
+ - If not specified, value of C(start_ipv6) is used.
+ - Only considered on create.
+ required: false
+ default: null
+ cidr_ipv6:
+ description:
+ - CIDR of IPv6 network, must be at least /64.
+ - Only considered on create.
+ required: false
+ default: null
+ gateway_ipv6:
+ description:
+ - The gateway of the IPv6 network.
+ - Required for shared networks.
+ - Only considered on create.
+ required: false
+ default: null
+ vlan:
+ description:
+ - The ID or VID of the network.
+ required: false
+ default: null
+ vpc:
+ description:
+ - The ID or VID of the network.
+ required: false
+ default: null
+ isolated_pvlan:
+ description:
+ - The isolated private vlan for this network.
+ required: false
+ default: null
+ clean_up:
+ description:
+ - Cleanup old network elements.
+ - Only considered on C(state=restarted).
+ required: false
+ default: false
+ acl_type:
+ description:
+ - Access control type.
+ - Only considered on create.
+ required: false
+ default: account
+ choices: [ 'account', 'domain' ]
+ network_domain:
+ description:
+ - The network domain.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the network.
+ required: false
+ default: present
+ choices: [ 'present', 'absent', 'restarted' ]
+ zone:
+ description:
+ - Name of the zone in which the network should be deployed.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the network to be deployed in.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the network is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the network is related to.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a network
+- local_action:
+ module: cs_network
+ name: my network
+ zone: gva-01
+ network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService
+ network_domain: example.com
+
+# update a network
+- local_action:
+ module: cs_network
+ name: my network
+ display_text: network of domain example.local
+ network_domain: example.local
+
+# restart a network with clean up
+- local_action:
+ module: cs_network
+ name: my network
+ clean_up: yes
+ state: restared
+
+# remove a network
+- local_action:
+ module: cs_network
+ name: my network
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the network.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the network.
+ returned: success
+ type: string
+ sample: web project
+display_text:
+ description: Display text of the network.
+ returned: success
+ type: string
+ sample: web project
+dns1:
+ description: IP address of the 1st nameserver.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+dns2:
+ description: IP address of the 2nd nameserver.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+cidr:
+ description: IPv4 network CIDR.
+ returned: success
+ type: string
+ sample: 10.101.64.0/24
+gateway:
+ description: IPv4 gateway.
+ returned: success
+ type: string
+ sample: 10.101.64.1
+netmask:
+ description: IPv4 netmask.
+ returned: success
+ type: string
+ sample: 255.255.255.0
+cidr_ipv6:
+ description: IPv6 network CIDR.
+ returned: success
+ type: string
+ sample: 2001:db8::/64
+gateway_ipv6:
+ description: IPv6 gateway.
+ returned: success
+ type: string
+ sample: 2001:db8::1
+state:
+ description: State of the network.
+ returned: success
+ type: string
+ sample: Implemented
+zone:
+ description: Name of zone.
+ returned: success
+ type: string
+ sample: ch-gva-2
+domain:
+ description: Domain the network is related to.
+ returned: success
+ type: string
+ sample: ROOT
+account:
+ description: Account the network is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project.
+ returned: success
+ type: string
+ sample: Production
+tags:
+ description: List of resource tags associated with the network.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+acl_type:
+ description: Access type of the network (Domain, Account).
+ returned: success
+ type: string
+ sample: Account
+broadcast_domain_type:
+ description: Broadcast domain type of the network.
+ returned: success
+ type: string
+ sample: Vlan
+type:
+ description: Type of the network.
+ returned: success
+ type: string
+ sample: Isolated
+traffic_type:
+ description: Traffic type of the network.
+ returned: success
+ type: string
+ sample: Guest
+state:
+ description: State of the network (Allocated, Implemented, Setup).
+ returned: success
+ type: string
+ sample: Allocated
+is_persistent:
+ description: Whether the network is persistent or not.
+ returned: success
+ type: boolean
+ sample: false
+network_domain:
+ description: The network domain
+ returned: success
+ type: string
+ sample: example.local
+network_offering:
+ description: The network offering name.
+ returned: success
+ type: string
+ sample: DefaultIsolatedNetworkOfferingWithSourceNatService
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackNetwork(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackNetwork, self).__init__(module)
+ self.returns = {
+ 'networkdomain': 'network domain',
+ 'networkofferingname': 'network_offering',
+ 'ispersistent': 'is_persistent',
+ 'acltype': 'acl_type',
+ 'type': 'type',
+ 'traffictype': 'traffic_type',
+ 'ip6gateway': 'gateway_ipv6',
+ 'ip6cidr': 'cidr_ipv6',
+ 'gateway': 'gateway',
+ 'cidr': 'cidr',
+ 'netmask': 'netmask',
+ 'broadcastdomaintype': 'broadcast_domain_type',
+ 'dns1': 'dns1',
+ 'dns2': 'dns2',
+ }
+
+ self.network = None
+
+
+ def get_vpc(self, key=None):
+ vpc = self.module.params.get('vpc')
+ if not vpc:
+ return None
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+
+ vpcs = self.cs.listVPCs(**args)
+ if vpcs:
+ for v in vpcs['vpc']:
+ if vpc in [ v['name'], v['displaytext'], v['id'] ]:
+ return self._get_by_key(key, v)
+ self.module.fail_json(msg="VPC '%s' not found" % vpc)
+
+
+ def get_network_offering(self, key=None):
+ network_offering = self.module.params.get('network_offering')
+ if not network_offering:
+ self.module.fail_json(msg="missing required arguments: network_offering")
+
+ args = {}
+ args['zoneid'] = self.get_zone(key='id')
+
+ network_offerings = self.cs.listNetworkOfferings(**args)
+ if network_offerings:
+ for no in network_offerings['networkoffering']:
+ if network_offering in [ no['name'], no['displaytext'], no['id'] ]:
+ return self._get_by_key(key, no)
+ self.module.fail_json(msg="Network offering '%s' not found" % network_offering)
+
+
+ def _get_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+ args['networkdomain'] = self.module.params.get('network_domain')
+ args['networkofferingid'] = self.get_network_offering(key='id')
+ return args
+
+
+ def get_network(self):
+ if not self.network:
+ network = self.module.params.get('name')
+
+ args = {}
+ args['zoneid'] = self.get_zone(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ networks = self.cs.listNetworks(**args)
+ if networks:
+ for n in networks['network']:
+ if network in [ n['name'], n['displaytext'], n['id']]:
+ self.network = n
+ break
+ return self.network
+
+
+ def present_network(self):
+ network = self.get_network()
+ if not network:
+ network = self.create_network(network)
+ else:
+ network = self.update_network(network)
+ return network
+
+
+ def update_network(self, network):
+ args = self._get_args()
+ args['id'] = network['id']
+
+ if self.has_changed(args, network):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ network = self.cs.updateNetwork(**args)
+
+ if 'errortext' in network:
+ self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if network and poll_async:
+ network = self.poll_job(network, 'network')
+ return network
+
+
+ def create_network(self, network):
+ self.result['changed'] = True
+
+ args = self._get_args()
+ args['acltype'] = self.module.params.get('acl_type')
+ args['zoneid'] = self.get_zone(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['startip'] = self.module.params.get('start_ip')
+ args['endip'] = self.get_or_fallback('end_ip', 'start_ip')
+ args['netmask'] = self.module.params.get('netmask')
+ args['gateway'] = self.module.params.get('gateway')
+ args['startipv6'] = self.module.params.get('start_ipv6')
+ args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6')
+ args['ip6cidr'] = self.module.params.get('cidr_ipv6')
+ args['ip6gateway'] = self.module.params.get('gateway_ipv6')
+ args['vlan'] = self.module.params.get('vlan')
+ args['isolatedpvlan'] = self.module.params.get('isolated_pvlan')
+ args['subdomainaccess'] = self.module.params.get('subdomain_access')
+ args['vpcid'] = self.get_vpc(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.createNetwork(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ network = res['network']
+ return network
+
+
+ def restart_network(self):
+ network = self.get_network()
+
+ if not network:
+ self.module.fail_json(msg="No network named '%s' found." % self.module.params('name'))
+
+ # Restarting only available for these states
+ if network['state'].lower() in [ 'implemented', 'setup' ]:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = network['id']
+ args['cleanup'] = self.module.params.get('clean_up')
+
+ if not self.module.check_mode:
+ network = self.cs.restartNetwork(**args)
+
+ if 'errortext' in network:
+ self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if network and poll_async:
+ network = self.poll_job(network, 'network')
+ return network
+
+
+ def absent_network(self):
+ network = self.get_network()
+ if network:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = network['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deleteNetwork(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'network')
+ return network
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ display_text = dict(default=None),
+ network_offering = dict(default=None),
+ zone = dict(default=None),
+ start_ip = dict(default=None),
+ end_ip = dict(default=None),
+ gateway = dict(default=None),
+ netmask = dict(default=None),
+ start_ipv6 = dict(default=None),
+ end_ipv6 = dict(default=None),
+ cidr_ipv6 = dict(default=None),
+ gateway_ipv6 = dict(default=None),
+ vlan = dict(default=None),
+ vpc = dict(default=None),
+ isolated_pvlan = dict(default=None),
+ clean_up = dict(type='bool', default=False),
+ network_domain = dict(default=None),
+ state = dict(choices=['present', 'absent', 'restarted' ], default='present'),
+ acl_type = dict(choices=['account', 'domain'], default='account'),
+ project = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+ required_together = cs_required_together()
+ required_together.extend([
+ ['start_ip', 'netmask', 'gateway'],
+ ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ supports_check_mode=True
+ )
+
+ try:
+ acs_network = AnsibleCloudStackNetwork(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ network = acs_network.absent_network()
+
+ elif state in ['restarted']:
+ network = acs_network.restart_network()
+
+ else:
+ network = acs_network.present_network()
+
+ result = acs_network.get_result(network)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_pod.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_pod.py
new file mode 100644
index 0000000000..e78eb2844c
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_pod.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_pod
+short_description: Manages pods on Apache CloudStack based clouds.
+description:
+ - Create, update, delete pods.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the pod.
+ required: true
+ id:
+ description:
+ - uuid of the exising pod.
+ default: null
+ required: false
+ start_ip:
+ description:
+ - Starting IP address for the Pod.
+ - Required on C(state=present)
+ default: null
+ required: false
+ end_ip:
+ description:
+ - Ending IP address for the Pod.
+ default: null
+ required: false
+ netmask:
+ description:
+ - Netmask for the Pod.
+ - Required on C(state=present)
+ default: null
+ required: false
+ gateway:
+ description:
+ - Gateway for the Pod.
+ - Required on C(state=present)
+ default: null
+ required: false
+ zone:
+ description:
+ - Name of the zone in which the pod belongs to.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the pod.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'enabled', 'disabled', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a pod is present
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ start_ip: 10.100.10.101
+ gateway: 10.100.10.1
+ netmask: 255.255.255.0
+
+# Ensure a pod is disabled
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ state: disabled
+
+# Ensure a pod is enabled
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ state: enabled
+
+# Ensure a pod is absent
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the pod.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the pod.
+ returned: success
+ type: string
+ sample: pod01
+start_ip:
+ description: Starting IP of the pod.
+ returned: success
+ type: string
+ sample: 10.100.1.101
+end_ip:
+ description: Ending IP of the pod.
+ returned: success
+ type: string
+ sample: 10.100.1.254
+netmask:
+ description: Netmask of the pod.
+ returned: success
+ type: string
+ sample: 255.255.255.0
+gateway:
+ description: Gateway of the pod.
+ returned: success
+ type: string
+ sample: 10.100.1.1
+allocation_state:
+ description: State of the pod.
+ returned: success
+ type: string
+ sample: Enabled
+zone:
+ description: Name of zone the pod is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackPod(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackPod, self).__init__(module)
+ self.returns = {
+ 'endip': 'end_ip',
+ 'startip': 'start_ip',
+ 'gateway': 'gateway',
+ 'netmask': 'netmask',
+ 'allocationstate': 'allocation_state',
+ }
+ self.pod = None
+
+
+ def _get_common_pod_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['zoneid'] = self.get_zone(key='id')
+ args['startip'] = self.module.params.get('start_ip')
+ args['endip'] = self.module.params.get('end_ip')
+ args['netmask'] = self.module.params.get('netmask')
+ args['gateway'] = self.module.params.get('gateway')
+ state = self.module.params.get('state')
+ if state in [ 'enabled', 'disabled']:
+ args['allocationstate'] = state.capitalize()
+ return args
+
+
+ def get_pod(self):
+ if not self.pod:
+ args = {}
+
+ uuid = self.module.params.get('id')
+ if uuid:
+ args['id'] = uuid
+ args['zoneid'] = self.get_zone(key='id')
+ pods = self.cs.listPods(**args)
+ if pods:
+ self.pod = pods['pod'][0]
+ return self.pod
+
+ args['name'] = self.module.params.get('name')
+ args['zoneid'] = self.get_zone(key='id')
+ pods = self.cs.listPods(**args)
+ if pods:
+ self.pod = pods['pod'][0]
+ return self.pod
+
+
+ def present_pod(self):
+ pod = self.get_pod()
+ if pod:
+ pod = self._update_pod()
+ else:
+ pod = self._create_pod()
+ return pod
+
+
+ def _create_pod(self):
+ required_params = [
+ 'start_ip',
+ 'netmask',
+ 'gateway',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ pod = None
+ self.result['changed'] = True
+ args = self._get_common_pod_args()
+ if not self.module.check_mode:
+ res = self.cs.createPod(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ pod = res['pod']
+ return pod
+
+
+ def _update_pod(self):
+ pod = self.get_pod()
+ args = self._get_common_pod_args()
+ args['id'] = pod['id']
+
+ if self.has_changed(args, pod):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.updatePod(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ pod = res['pod']
+ return pod
+
+
+ def absent_pod(self):
+ pod = self.get_pod()
+ if pod:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = pod['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deletePod(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return pod
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ id = dict(default=None),
+ name = dict(required=True),
+ gateway = dict(default=None),
+ netmask = dict(default=None),
+ start_ip = dict(default=None),
+ end_ip = dict(default=None),
+ zone = dict(default=None),
+ state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_pod = AnsibleCloudStackPod(module)
+ state = module.params.get('state')
+ if state in ['absent']:
+ pod = acs_pod.absent_pod()
+ else:
+ pod = acs_pod.present_pod()
+
+ result = acs_pod.get_result(pod)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_portforward.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_portforward.py
new file mode 100644
index 0000000000..3c492c5461
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_portforward.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_portforward
+short_description: Manages port forwarding rules on Apache CloudStack based clouds.
+description:
+ - Create, update and remove port forwarding rules.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the rule is assigned to.
+ required: true
+ vm:
+ description:
+ - Name of virtual machine which we make the port forwarding rule for.
+ - Required if C(state=present).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the port forwarding rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ protocol:
+ description:
+ - Protocol of the port forwarding rule.
+ required: false
+ default: 'tcp'
+ choices: [ 'tcp', 'udp' ]
+ public_port:
+ description:
+ - Start public port for this rule.
+ required: true
+ public_end_port:
+ description:
+ - End public port for this rule.
+ - If not specified equal C(public_port).
+ required: false
+ default: null
+ private_port:
+ description:
+ - Start private port for this rule.
+ required: true
+ private_end_port:
+ description:
+ - End private port for this rule.
+ - If not specified equal C(private_port).
+ required: false
+ default: null
+ open_firewall:
+ description:
+ - Whether the firewall rule for public port should be created, while creating the new rule.
+ - Use M(cs_firewall) for managing firewall rules.
+ required: false
+ default: false
+ vm_guest_ip:
+ description:
+ - VM guest NIC secondary IP address for the port forwarding rule.
+ required: false
+ default: false
+ domain:
+ description:
+ - Domain the C(vm) is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the C(vm) is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the C(vm) is located in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# 1.2.3.4:80 -> web01:8080
+- local_action:
+ module: cs_portforward
+ ip_address: 1.2.3.4
+ vm: web01
+ public_port: 80
+ private_port: 8080
+
+# forward SSH and open firewall
+- local_action:
+ module: cs_portforward
+ ip_address: '{{ public_ip }}'
+ vm: '{{ inventory_hostname }}'
+ public_port: '{{ ansible_ssh_port }}'
+ private_port: 22
+ open_firewall: true
+
+# forward DNS traffic, but do not open firewall
+- local_action:
+ module: cs_portforward
+ ip_address: 1.2.3.4
+ vm: '{{ inventory_hostname }}'
+ public_port: 53
+ private_port: 53
+ protocol: udp
+
+# remove ssh port forwarding
+- local_action:
+ module: cs_portforward
+ ip_address: 1.2.3.4
+ public_port: 22
+ private_port: 22
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the public IP address.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+protocol:
+ description: Protocol.
+ returned: success
+ type: string
+ sample: tcp
+private_port:
+ description: Start port on the virtual machine's IP address.
+ returned: success
+ type: int
+ sample: 80
+private_end_port:
+ description: End port on the virtual machine's IP address.
+ returned: success
+ type: int
+public_port:
+ description: Start port on the public IP address.
+ returned: success
+ type: int
+ sample: 80
+public_end_port:
+ description: End port on the public IP address.
+ returned: success
+ type: int
+ sample: 80
+tags:
+ description: Tags related to the port forwarding.
+ returned: success
+ type: list
+ sample: []
+vm_name:
+ description: Name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_display_name:
+ description: Display name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_guest_ip:
+ description: IP of the virtual machine.
+ returned: success
+ type: string
+ sample: 10.101.65.152
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackPortforwarding, self).__init__(module)
+ self.returns = {
+ 'virtualmachinedisplayname': 'vm_display_name',
+ 'virtualmachinename': 'vm_name',
+ 'ipaddress': 'ip_address',
+ 'vmguestip': 'vm_guest_ip',
+ 'publicip': 'public_ip',
+ 'protocol': 'protocol',
+ }
+ # these values will be casted to int
+ self.returns_to_int = {
+ 'publicport': 'public_port',
+ 'publicendport': 'public_end_port',
+ 'privateport': 'private_port',
+ 'privateendport': 'private_end_port',
+ }
+ self.portforwarding_rule = None
+
+
+ def get_portforwarding_rule(self):
+ if not self.portforwarding_rule:
+ protocol = self.module.params.get('protocol')
+ public_port = self.module.params.get('public_port')
+ public_end_port = self.get_or_fallback('public_end_port', 'public_port')
+ private_port = self.module.params.get('private_port')
+ private_end_port = self.get_or_fallback('private_end_port', 'private_port')
+
+ args = {}
+ args['ipaddressid'] = self.get_ip_address(key='id')
+ args['projectid'] = self.get_project(key='id')
+ portforwarding_rules = self.cs.listPortForwardingRules(**args)
+
+ if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
+ for rule in portforwarding_rules['portforwardingrule']:
+ if protocol == rule['protocol'] \
+ and public_port == int(rule['publicport']):
+ self.portforwarding_rule = rule
+ break
+ return self.portforwarding_rule
+
+
+ def present_portforwarding_rule(self):
+ portforwarding_rule = self.get_portforwarding_rule()
+ if portforwarding_rule:
+ portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
+ else:
+ portforwarding_rule = self.create_portforwarding_rule()
+ return portforwarding_rule
+
+
+ def create_portforwarding_rule(self):
+ args = {}
+ args['protocol'] = self.module.params.get('protocol')
+ args['publicport'] = self.module.params.get('public_port')
+ args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
+ args['privateport'] = self.module.params.get('private_port')
+ args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
+ args['openfirewall'] = self.module.params.get('open_firewall')
+ args['vmguestip'] = self.get_vm_guest_ip()
+ args['ipaddressid'] = self.get_ip_address(key='id')
+ args['virtualmachineid'] = self.get_vm(key='id')
+
+ portforwarding_rule = None
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ portforwarding_rule = self.cs.createPortForwardingRule(**args)
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
+ return portforwarding_rule
+
+
+ def update_portforwarding_rule(self, portforwarding_rule):
+ args = {}
+ args['protocol'] = self.module.params.get('protocol')
+ args['publicport'] = self.module.params.get('public_port')
+ args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
+ args['privateport'] = self.module.params.get('private_port')
+ args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
+ args['vmguestip'] = self.get_vm_guest_ip()
+ args['ipaddressid'] = self.get_ip_address(key='id')
+ args['virtualmachineid'] = self.get_vm(key='id')
+
+ if self.has_changed(args, portforwarding_rule):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ # API broken in 4.2.1?, workaround using remove/create instead of update
+ # portforwarding_rule = self.cs.updatePortForwardingRule(**args)
+ self.absent_portforwarding_rule()
+ portforwarding_rule = self.cs.createPortForwardingRule(**args)
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
+ return portforwarding_rule
+
+
+ def absent_portforwarding_rule(self):
+ portforwarding_rule = self.get_portforwarding_rule()
+
+ if portforwarding_rule:
+ self.result['changed'] = True
+ args = {}
+ args['id'] = portforwarding_rule['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deletePortForwardingRule(**args)
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'portforwardingrule')
+ return portforwarding_rule
+
+
+ def get_result(self, portforwarding_rule):
+ super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
+ if portforwarding_rule:
+ # Bad bad API does not always return int when it should.
+ for search_key, return_key in self.returns_to_int.iteritems():
+ if search_key in portforwarding_rule:
+ self.result[return_key] = int(portforwarding_rule[search_key])
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(required=True),
+ protocol= dict(choices=['tcp', 'udp'], default='tcp'),
+ public_port = dict(type='int', required=True),
+ public_end_port = dict(type='int', default=None),
+ private_port = dict(type='int', required=True),
+ private_end_port = dict(type='int', default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ open_firewall = dict(type='bool', default=False),
+ vm_guest_ip = dict(default=None),
+ vm = dict(default=None),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_pf = AnsibleCloudStackPortforwarding(module)
+ state = module.params.get('state')
+ if state in ['absent']:
+ pf_rule = acs_pf.absent_portforwarding_rule()
+ else:
+ pf_rule = acs_pf.present_portforwarding_rule()
+
+ result = acs_pf.get_result(pf_rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_project.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_project.py
new file mode 100644
index 0000000000..6f3d41b391
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_project.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_project
+short_description: Manages projects on Apache CloudStack based clouds.
+description:
+ - Create, update, suspend, activate and remove projects.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the project.
+ required: true
+ display_text:
+ description:
+ - Display text of the project.
+ - If not specified, C(name) will be used as C(display_text).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the project.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'active', 'suspended' ]
+ domain:
+ description:
+ - Domain the project is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the project is related to.
+ required: false
+ default: null
+ tags:
+ description:
+ - List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
+ - "If you want to delete all tags, set a empty list e.g. C(tags: [])."
+ required: false
+ default: null
+ version_added: "2.2"
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a project
+- local_action:
+ module: cs_project
+ name: web
+ tags:
+ - { key: admin, value: john }
+ - { key: foo, value: bar }
+
+# Rename a project
+- local_action:
+ module: cs_project
+ name: web
+ display_text: my web project
+
+# Suspend an existing project
+- local_action:
+ module: cs_project
+ name: web
+ state: suspended
+
+# Activate an existing project
+- local_action:
+ module: cs_project
+ name: web
+ state: active
+
+# Remove a project
+- local_action:
+ module: cs_project
+ name: web
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the project.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the project.
+ returned: success
+ type: string
+ sample: web project
+display_text:
+ description: Display text of the project.
+ returned: success
+ type: string
+ sample: web project
+state:
+ description: State of the project.
+ returned: success
+ type: string
+ sample: Active
+domain:
+ description: Domain the project is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the project is related to.
+ returned: success
+ type: string
+ sample: example account
+tags:
+ description: List of resource tags associated with the project.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackProject(AnsibleCloudStack):
+
+
+ def get_project(self):
+ if not self.project:
+ project = self.module.params.get('name')
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ projects = self.cs.listProjects(**args)
+ if projects:
+ for p in projects['project']:
+ if project.lower() in [ p['name'].lower(), p['id']]:
+ self.project = p
+ break
+ return self.project
+
+
+ def present_project(self):
+ project = self.get_project()
+ if not project:
+ project = self.create_project(project)
+ else:
+ project = self.update_project(project)
+ if project:
+ project = self.ensure_tags(resource=project, resource_type='project')
+ # refresh resource
+ self.project = project
+ return project
+
+
+ def update_project(self, project):
+ args = {}
+ args['id'] = project['id']
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+
+ if self.has_changed(args, project):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ project = self.cs.updateProject(**args)
+
+ if 'errortext' in project:
+ self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if project and poll_async:
+ project = self.poll_job(project, 'project')
+ return project
+
+
+ def create_project(self, project):
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+
+ if not self.module.check_mode:
+ project = self.cs.createProject(**args)
+
+ if 'errortext' in project:
+ self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if project and poll_async:
+ project = self.poll_job(project, 'project')
+ return project
+
+
+ def state_project(self, state='active'):
+ project = self.present_project()
+
+ if project['state'].lower() != state:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = project['id']
+
+ if not self.module.check_mode:
+ if state == 'suspended':
+ project = self.cs.suspendProject(**args)
+ else:
+ project = self.cs.activateProject(**args)
+
+ if 'errortext' in project:
+ self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if project and poll_async:
+ project = self.poll_job(project, 'project')
+ return project
+
+
+ def absent_project(self):
+ project = self.get_project()
+ if project:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = project['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deleteProject(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'project')
+ return project
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ display_text = dict(default=None),
+ state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'),
+ domain = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ tags=dict(type='list', aliases=['tag'], default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_project = AnsibleCloudStackProject(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ project = acs_project.absent_project()
+
+ elif state in ['active', 'suspended']:
+ project = acs_project.state_project(state=state)
+
+ else:
+ project = acs_project.present_project()
+
+ result = acs_project.get_result(project)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_resourcelimit.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_resourcelimit.py
new file mode 100644
index 0000000000..40567165c5
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_resourcelimit.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_resourcelimit
+short_description: Manages resource limits on Apache CloudStack based clouds.
+description:
+ - Manage limits of resources for domains, accounts and projects.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ resource_type:
+ description:
+ - Type of the resource.
+ required: true
+ choices:
+ - instance
+ - ip_address
+ - volume
+ - snapshot
+ - template
+ - network
+ - vpc
+ - cpu
+ - memory
+ - primary_storage
+ - secondary_storage
+ aliases: [ 'type' ]
+ limit:
+ description:
+ - Maximum number of the resource.
+ - Default is unlimited C(-1).
+ required: false
+ default: -1
+ aliases: [ 'max' ]
+ domain:
+ description:
+ - Domain the resource is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the resource is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the resource is related to.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Update a resource limit for instances of a domain
+local_action:
+ module: cs_resourcelimit
+ type: instance
+ limit: 10
+ domain: customers
+
+# Update a resource limit for instances of an account
+local_action:
+ module: cs_resourcelimit
+ type: instance
+ limit: 12
+ account: moserre
+ domain: customers
+'''
+
+RETURN = '''
+---
+recource_type:
+ description: Type of the resource
+ returned: success
+ type: string
+ sample: instance
+limit:
+ description: Maximum number of the resource.
+ returned: success
+ type: int
+ sample: -1
+domain:
+ description: Domain the resource is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the resource is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Project the resource is related to.
+ returned: success
+ type: string
+ sample: example project
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+RESOURCE_TYPES = {
+ 'instance': 0,
+ 'ip_address': 1,
+ 'volume': 2,
+ 'snapshot': 3,
+ 'template': 4,
+ 'network': 6,
+ 'vpc': 7,
+ 'cpu': 8,
+ 'memory': 9,
+ 'primary_storage': 10,
+ 'secondary_storage': 11,
+}
+
+class AnsibleCloudStackResourceLimit(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackResourceLimit, self).__init__(module)
+ self.returns = {
+ 'max': 'limit',
+ }
+
+
+ def get_resource_type(self):
+ resource_type = self.module.params.get('resource_type')
+ return RESOURCE_TYPES.get(resource_type)
+
+
+ def get_resource_limit(self):
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['resourcetype'] = self.get_resource_type()
+ resource_limit = self.cs.listResourceLimits(**args)
+ if resource_limit:
+ return resource_limit['resourcelimit'][0]
+ self.module.fail_json(msg="Resource limit type '%s' not found." % self.module.params.get('resource_type'))
+
+
+ def update_resource_limit(self):
+ resource_limit = self.get_resource_limit()
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['resourcetype'] = self.get_resource_type()
+ args['max'] = self.module.params.get('limit', -1)
+
+ if self.has_changed(args, resource_limit):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateResourceLimit(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ resource_limit = res['resourcelimit']
+ return resource_limit
+
+
+ def get_result(self, resource_limit):
+ self.result = super(AnsibleCloudStackResourceLimit, self).get_result(resource_limit)
+ self.result['resource_type'] = self.module.params.get('resource_type')
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ resource_type = dict(required=True, choices=RESOURCE_TYPES.keys(), aliases=['type']),
+ limit = dict(default=-1, aliases=['max']),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_resource_limit = AnsibleCloudStackResourceLimit(module)
+ resource_limit = acs_resource_limit.update_resource_limit()
+ result = acs_resource_limit.get_result(resource_limit)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_router.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_router.py
new file mode 100644
index 0000000000..73575c8001
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_router.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_router
+short_description: Manages routers on Apache CloudStack based clouds.
+description:
+ - Start, restart, stop and destroy routers.
+ - C(state=present) is not able to create routers, use M(cs_network) instead.
+version_added: "2.2"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the router.
+ required: true
+ service_offering:
+ description:
+ - Name or id of the service offering of the router.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the router is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the router is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the router is related to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the router.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'started', 'stopped', 'restarted' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure the router has the desired service offering, no matter if
+# the router is running or not.
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ service_offering: System Offering for Software Router
+
+# Ensure started
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ state: started
+
+# Ensure started with desired service offering.
+# If the service offerings changes, router will be rebooted.
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ service_offering: System Offering for Software Router
+ state: started
+
+# Ensure stopped
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ state: stopped
+
+# Remove a router
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the router.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the router.
+ returned: success
+ type: string
+ sample: r-40-VM
+created:
+ description: Date of the router was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+template_version:
+ description: Version of the system VM template.
+ returned: success
+ type: string
+ sample: 4.5.1
+requires_upgrade:
+ description: Whether the router needs to be upgraded to the new template.
+ returned: success
+ type: bool
+ sample: false
+redundant_state:
+ description: Redundant state of the router.
+ returned: success
+ type: string
+ sample: UNKNOWN
+role:
+ description: Role of the router.
+ returned: success
+ type: string
+ sample: VIRTUAL_ROUTER
+zone:
+ description: Name of zone the router is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+service_offering:
+ description: Name of the service offering the router has.
+ returned: success
+ type: string
+ sample: System Offering For Software Router
+state:
+ description: State of the router.
+ returned: success
+ type: string
+ sample: Active
+domain:
+ description: Domain the router is related to.
+ returned: success
+ type: string
+ sample: ROOT
+account:
+ description: Account the router is related to.
+ returned: success
+ type: string
+ sample: admin
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackRouter(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRouter, self).__init__(module)
+ self.returns = {
+ 'serviceofferingname': 'service_offering',
+ 'version': 'template_version',
+ 'requiresupgrade': 'requires_upgrade',
+ 'redundantstate': 'redundant_state',
+ 'role': 'role'
+ }
+ self.router = None
+
+
+ def get_service_offering_id(self):
+ service_offering = self.module.params.get('service_offering')
+ if not service_offering:
+ return None
+
+ args = {}
+ args['issystem'] = True
+
+ service_offerings = self.cs.listServiceOfferings(**args)
+ if service_offerings:
+ for s in service_offerings['serviceoffering']:
+ if service_offering in [ s['name'], s['id'] ]:
+ return s['id']
+ self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
+
+ def get_router(self):
+ if not self.router:
+ router = self.module.params.get('name')
+
+ args = {}
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ routers = self.cs.listRouters(**args)
+ if routers:
+ for r in routers['router']:
+ if router.lower() in [ r['name'].lower(), r['id']]:
+ self.router = r
+ break
+ return self.router
+
+ def start_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router not found")
+
+ if router['state'].lower() != "running":
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.startRouter(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ router = self.poll_job(res, 'router')
+ return router
+
+ def stop_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router not found")
+
+ if router['state'].lower() != "stopped":
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.stopRouter(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ router = self.poll_job(res, 'router')
+ return router
+
+ def reboot_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router not found")
+
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.rebootRouter(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ router = self.poll_job(res, 'router')
+ return router
+
+ def absent_router(self):
+ router = self.get_router()
+ if router:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.destroyRouter(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'router')
+ return router
+
+
+ def present_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router can not be created using the API, see cs_network.")
+
+ args = {}
+ args['id'] = router['id']
+ args['serviceofferingid'] = self.get_service_offering_id()
+
+ state = self.module.params.get('state')
+
+ if self.has_changed(args, router):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ current_state = router['state'].lower()
+
+ self.stop_router()
+ router = self.cs.changeServiceForRouter(**args)
+
+ if 'errortext' in router:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ if state in [ 'restarted', 'started' ]:
+ router = self.start_router()
+
+ # if state=present we get to the state before the service
+ # offering change.
+ elif state == "present" and current_state == "running":
+ router = self.start_router()
+
+ elif state == "started":
+ router = self.start_router()
+
+ elif state == "stopped":
+ router = self.stop_router()
+
+ elif state == "restarted":
+ router = self.reboot_router()
+
+ return router
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ service_offering = dict(default=None),
+ state = dict(choices=['present', 'started', 'stopped', 'restarted', 'absent'], default="present"),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_router = AnsibleCloudStackRouter(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ router = acs_router.absent_router()
+ else:
+ router = acs_router.present_router()
+
+ result = acs_router.get_result(router)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup.py
new file mode 100644
index 0000000000..edf4d533f4
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_securitygroup
+short_description: Manages security groups on Apache CloudStack based clouds.
+description:
+ - Create and remove security groups.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the security group.
+ required: true
+ description:
+ description:
+ - Description of the security group.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the security group.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the security group is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the security group is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the security group to be created in.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a security group
+- local_action:
+ module: cs_securitygroup
+ name: default
+ description: default security group
+
+# Remove a security group
+- local_action:
+ module: cs_securitygroup
+ name: default
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the security group.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of security group.
+ returned: success
+ type: string
+ sample: app
+description:
+ description: Description of security group.
+ returned: success
+ type: string
+ sample: application security group
+tags:
+ description: List of resource tags associated with the security group.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+project:
+ description: Name of project the security group is related to.
+ returned: success
+ type: string
+ sample: Production
+domain:
+ description: Domain the security group is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the security group is related to.
+ returned: success
+ type: string
+ sample: example account
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSecurityGroup, self).__init__(module)
+ self.security_group = None
+
+
+ def get_security_group(self):
+ if not self.security_group:
+
+ args = {}
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['securitygroupname'] = self.module.params.get('name')
+
+ sgs = self.cs.listSecurityGroups(**args)
+ if sgs:
+ self.security_group = sgs['securitygroup'][0]
+ return self.security_group
+
+
+ def create_security_group(self):
+ security_group = self.get_security_group()
+ if not security_group:
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['description'] = self.module.params.get('description')
+
+ if not self.module.check_mode:
+ res = self.cs.createSecurityGroup(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ security_group = res['securitygroup']
+
+ return security_group
+
+
+ def remove_security_group(self):
+ security_group = self.get_security_group()
+ if security_group:
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.deleteSecurityGroup(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ return security_group
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ description = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ project = dict(default=None),
+ account = dict(default=None),
+ domain = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_sg = AnsibleCloudStackSecurityGroup(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ sg = acs_sg.remove_security_group()
+ else:
+ sg = acs_sg.create_security_group()
+
+ result = acs_sg.get_result(sg)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup_rule.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup_rule.py
new file mode 100644
index 0000000000..5ac22960b5
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup_rule.py
@@ -0,0 +1,421 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_securitygroup_rule
+short_description: Manages security group rules on Apache CloudStack based clouds.
+description:
+ - Add and remove security group rules.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ security_group:
+ description:
+ - Name of the security group the rule is related to. The security group must be existing.
+ required: true
+ state:
+ description:
+ - State of the security group rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ protocol:
+ description:
+ - Protocol of the security group rule.
+ required: false
+ default: 'tcp'
+ choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ]
+ type:
+ description:
+ - Ingress or egress security group rule.
+ required: false
+ default: 'ingress'
+ choices: [ 'ingress', 'egress' ]
+ cidr:
+ description:
+ - CIDR (full notation) to be used for security group rule.
+ required: false
+ default: '0.0.0.0/0'
+ user_security_group:
+ description:
+ - Security group this rule is based of.
+ required: false
+ default: null
+ start_port:
+ description:
+ - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp).
+ required: false
+ default: null
+ aliases: [ 'port' ]
+ end_port:
+ description:
+ - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set.
+ required: false
+ default: null
+ icmp_type:
+ description:
+ - Type of the icmp message being sent. Required if C(protocol=icmp).
+ required: false
+ default: null
+ icmp_code:
+ description:
+ - Error code for this icmp message. Required if C(protocol=icmp).
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the security group to be created in.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+---
+# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ port: 80
+ cidr: 1.2.3.4/32
+
+# Allow tcp/udp outbound added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ type: egress
+ start_port: 1
+ end_port: 65535
+ protocol: '{{ item }}'
+ with_items:
+ - tcp
+ - udp
+
+# Allow inbound icmp from 0.0.0.0/0 added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ protocol: icmp
+ icmp_code: -1
+ icmp_type: -1
+
+# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ port: 80
+ state: absent
+
+# Allow inbound port 80/tcp from security group web added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ port: 80
+ user_security_group: web
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the of the rule.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+security_group:
+ description: security group of the rule.
+ returned: success
+ type: string
+ sample: default
+type:
+ description: type of the rule.
+ returned: success
+ type: string
+ sample: ingress
+cidr:
+ description: CIDR of the rule.
+ returned: success and cidr is defined
+ type: string
+ sample: 0.0.0.0/0
+user_security_group:
+ description: user security group of the rule.
+ returned: success and user_security_group is defined
+ type: string
+ sample: default
+protocol:
+ description: protocol of the rule.
+ returned: success
+ type: string
+ sample: tcp
+start_port:
+ description: start port of the rule.
+ returned: success
+ type: int
+ sample: 80
+end_port:
+ description: end port of the rule.
+ returned: success
+ type: int
+ sample: 80
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSecurityGroupRule, self).__init__(module)
+ self.returns = {
+ 'icmptype': 'icmp_type',
+ 'icmpcode': 'icmp_code',
+ 'endport': 'end_port',
+ 'startport': 'start_port',
+ 'protocol': 'protocol',
+ 'cidr': 'cidr',
+ 'securitygroupname': 'user_security_group',
+ }
+
+
+ def _tcp_udp_match(self, rule, protocol, start_port, end_port):
+ return protocol in ['tcp', 'udp'] \
+ and protocol == rule['protocol'] \
+ and start_port == int(rule['startport']) \
+ and end_port == int(rule['endport'])
+
+
+ def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
+ return protocol == 'icmp' \
+ and protocol == rule['protocol'] \
+ and icmp_code == int(rule['icmpcode']) \
+ and icmp_type == int(rule['icmptype'])
+
+
+ def _ah_esp_gre_match(self, rule, protocol):
+ return protocol in ['ah', 'esp', 'gre'] \
+ and protocol == rule['protocol']
+
+
+ def _type_security_group_match(self, rule, security_group_name):
+ return security_group_name \
+ and 'securitygroupname' in rule \
+ and security_group_name == rule['securitygroupname']
+
+
+ def _type_cidr_match(self, rule, cidr):
+ return 'cidr' in rule \
+ and cidr == rule['cidr']
+
+
+ def _get_rule(self, rules):
+ user_security_group_name = self.module.params.get('user_security_group')
+ cidr = self.module.params.get('cidr')
+ protocol = self.module.params.get('protocol')
+ start_port = self.module.params.get('start_port')
+ end_port = self.get_or_fallback('end_port', 'start_port')
+ icmp_code = self.module.params.get('icmp_code')
+ icmp_type = self.module.params.get('icmp_type')
+
+ if protocol in ['tcp', 'udp'] and not (start_port and end_port):
+ self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
+
+ if protocol == 'icmp' and not (icmp_type and icmp_code):
+ self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol)
+
+ for rule in rules:
+ if user_security_group_name:
+ type_match = self._type_security_group_match(rule, user_security_group_name)
+ else:
+ type_match = self._type_cidr_match(rule, cidr)
+
+ protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \
+ or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
+ or self._ah_esp_gre_match(rule, protocol)
+ )
+
+ if type_match and protocol_match:
+ return rule
+ return None
+
+
+ def get_security_group(self, security_group_name=None):
+ if not security_group_name:
+ security_group_name = self.module.params.get('security_group')
+ args = {}
+ args['securitygroupname'] = security_group_name
+ args['projectid'] = self.get_project('id')
+ sgs = self.cs.listSecurityGroups(**args)
+ if not sgs or 'securitygroup' not in sgs:
+ self.module.fail_json(msg="security group '%s' not found" % security_group_name)
+ return sgs['securitygroup'][0]
+
+
+ def add_rule(self):
+ security_group = self.get_security_group()
+
+ args = {}
+ user_security_group_name = self.module.params.get('user_security_group')
+
+ # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0.
+ # that is why we ignore if we have a user_security_group.
+ if user_security_group_name:
+ args['usersecuritygrouplist'] = []
+ user_security_group = self.get_security_group(user_security_group_name)
+ args['usersecuritygrouplist'].append({
+ 'group': user_security_group['name'],
+ 'account': user_security_group['account'],
+ })
+ else:
+ args['cidrlist'] = self.module.params.get('cidr')
+
+ args['protocol'] = self.module.params.get('protocol')
+ args['startport'] = self.module.params.get('start_port')
+ args['endport'] = self.get_or_fallback('end_port', 'start_port')
+ args['icmptype'] = self.module.params.get('icmp_type')
+ args['icmpcode'] = self.module.params.get('icmp_code')
+ args['projectid'] = self.get_project('id')
+ args['securitygroupid'] = security_group['id']
+
+ rule = None
+ res = None
+ sg_type = self.module.params.get('type')
+ if sg_type == 'ingress':
+ if 'ingressrule' in security_group:
+ rule = self._get_rule(security_group['ingressrule'])
+ if not rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.authorizeSecurityGroupIngress(**args)
+
+ elif sg_type == 'egress':
+ if 'egressrule' in security_group:
+ rule = self._get_rule(security_group['egressrule'])
+ if not rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.authorizeSecurityGroupEgress(**args)
+
+ if res and 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ security_group = self.poll_job(res, 'securitygroup')
+ key = sg_type + "rule" # ingressrule / egressrule
+ if key in security_group:
+ rule = security_group[key][0]
+ return rule
+
+
+ def remove_rule(self):
+ security_group = self.get_security_group()
+ rule = None
+ res = None
+ sg_type = self.module.params.get('type')
+ if sg_type == 'ingress':
+ rule = self._get_rule(security_group['ingressrule'])
+ if rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid'])
+
+ elif sg_type == 'egress':
+ rule = self._get_rule(security_group['egressrule'])
+ if rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid'])
+
+ if res and 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'securitygroup')
+ return rule
+
+
+ def get_result(self, security_group_rule):
+ super(AnsibleCloudStackSecurityGroupRule, self).get_result(security_group_rule)
+ self.result['type'] = self.module.params.get('type')
+ self.result['security_group'] = self.module.params.get('security_group')
+ return self.result
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ security_group = dict(required=True),
+ type = dict(choices=['ingress', 'egress'], default='ingress'),
+ cidr = dict(default='0.0.0.0/0'),
+ user_security_group = dict(default=None),
+ protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'),
+ icmp_type = dict(type='int', default=None),
+ icmp_code = dict(type='int', default=None),
+ start_port = dict(type='int', default=None, aliases=['port']),
+ end_port = dict(type='int', default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+ required_together = cs_required_together()
+ required_together.extend([
+ ['icmp_type', 'icmp_code'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ mutually_exclusive = (
+ ['icmp_type', 'start_port'],
+ ['icmp_type', 'end_port'],
+ ['icmp_code', 'start_port'],
+ ['icmp_code', 'end_port'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ sg_rule = acs_sg_rule.remove_rule()
+ else:
+ sg_rule = acs_sg_rule.add_rule()
+
+ result = acs_sg_rule.get_result(sg_rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_snapshot_policy.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_snapshot_policy.py
new file mode 100644
index 0000000000..ce8b2344f3
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_snapshot_policy.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_snapshot_policy
+short_description: Manages volume snapshot policies on Apache CloudStack based clouds.
+description:
+ - Create, update and delete volume snapshot policies.
+version_added: '2.2'
+author: "René Moser (@resmo)"
+options:
+ volume:
+ description:
+ - Name of the volume.
+ required: true
+ interval_type:
+ description:
+ - Interval of the snapshot.
+ required: false
+ default: 'daily'
+ choices: [ 'hourly', 'daily', 'weekly', 'monthly' ]
+ aliases: [ 'interval' ]
+ max_snaps:
+ description:
+ - Max number of snapshots.
+ required: false
+ default: 8
+ aliases: [ 'max' ]
+ schedule:
+ description:
+ - Time the snapshot is scheduled. Required if C(state=present).
+ - 'Format for C(interval_type=HOURLY): C(MM)'
+ - 'Format for C(interval_type=DAILY): C(MM:HH)'
+ - 'Format for C(interval_type=WEEKLY): C(MM:HH:DD (1-7))'
+ - 'Format for C(interval_type=MONTHLY): C(MM:HH:DD (1-28))'
+ required: false
+ default: null
+ time_zone:
+ description:
+ - Specifies a timezone for this command.
+ required: false
+ default: 'UTC'
+ aliases: [ 'timezone' ]
+ state:
+ description:
+ - State of the snapshot policy.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the volume is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the volume is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the volume is related to.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a snapshot policy daily at 1h00 UTC
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ schedule: '00:1'
+ max_snaps: 3
+
+# Ensure a snapshot policy hourly at minute 5 UTC
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ schedule: '5'
+ interval_type: hourly
+ max_snaps: 1
+
+# Ensure a snapshot policy weekly on Sunday at 05h00, TZ Europe/Zurich
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ schedule: '00:5:1'
+ interval_type: weekly
+ max_snaps: 1
+ time_zone: 'Europe/Zurich'
+
+# Ensure a snapshot policy is absent
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ interval_type: hourly
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the snapshot policy.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+interval_type:
+ description: interval type of the snapshot policy.
+ returned: success
+ type: string
+ sample: daily
+schedule:
+ description: schedule of the snapshot policy.
+ returned: success
+ type: string
+ sample:
+max_snaps:
+ description: maximum number of snapshots retained.
+ returned: success
+ type: int
+ sample: 10
+time_zone:
+ description: the time zone of the snapshot policy.
+ returned: success
+ type: string
+ sample: Etc/UTC
+volume:
+ description: the volume of the snapshot policy.
+ returned: success
+ type: string
+ sample: Etc/UTC
+zone:
+ description: Name of zone the volume is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the volume is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the volume is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the volume is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackSnapshotPolicy(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSnapshotPolicy, self).__init__(module)
+ self.returns = {
+ 'schedule': 'schedule',
+ 'timezone': 'time_zone',
+ 'maxsnaps': 'max_snaps',
+ }
+ self.interval_types = {
+ 'hourly': 0,
+ 'daily': 1,
+ 'weekly': 2,
+ 'monthly': 3,
+ }
+ self.volume = None
+
+ def get_interval_type(self):
+ interval_type = self.module.params.get('interval_type')
+ return self.interval_types[interval_type]
+
+ def get_volume(self, key=None):
+ if self.volume:
+ return self._get_by_key(key, self.volume)
+
+ args = {
+ 'name': self.module.params.get('volume'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ }
+ volumes = self.cs.listVolumes(**args)
+ if volumes:
+ self.volume = volumes['volume'][0]
+ return self._get_by_key(key, self.volume)
+ return None
+
+ def get_snapshot_policy(self):
+ args = {
+ 'volumeid': self.get_volume(key='id')
+ }
+ policies = self.cs.listSnapshotPolicies(**args)
+ if policies:
+ for policy in policies['snapshotpolicy']:
+ if policy['intervaltype'] == self.get_interval_type():
+ return policy
+ return None
+
+ def present_snapshot_policy(self):
+ required_params = [
+ 'schedule',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ policy = self.get_snapshot_policy()
+ args = {
+ 'intervaltype': self.module.params.get('interval_type'),
+ 'schedule': self.module.params.get('schedule'),
+ 'maxsnaps': self.module.params.get('max_snaps'),
+ 'timezone': self.module.params.get('time_zone'),
+ 'volumeid': self.get_volume(key='id')
+ }
+ if not policy or (policy and self.has_changed(policy, args)):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.createSnapshotPolicy(**args)
+ policy = res['snapshotpolicy']
+ if 'errortext' in policy:
+ self.module.fail_json(msg="Failed: '%s'" % policy['errortext'])
+ return policy
+
+ def absent_snapshot_policy(self):
+ policy = self.get_snapshot_policy()
+ if policy:
+ self.result['changed'] = True
+ args = {
+ 'id': policy['id']
+ }
+ if not self.module.check_mode:
+ res = self.cs.deleteSnapshotPolicies(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % policy['errortext'])
+ return policy
+
+ def get_result(self, policy):
+ super(AnsibleCloudStackSnapshotPolicy, self).get_result(policy)
+ if policy and 'intervaltype' in policy:
+ for key, value in self.interval_types.items():
+ if value == policy['intervaltype']:
+ self.result['interval_type'] = key
+ break
+ volume = self.get_volume()
+ if volume:
+ volume_results = {
+ 'volume': volume.get('name'),
+ 'zone': volume.get('zonename'),
+ 'project': volume.get('project'),
+ 'account': volume.get('account'),
+ 'domain': volume.get('domain'),
+ }
+ self.result.update(volume_results)
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ volume=dict(required=True),
+ interval_type=dict(default='daily', choices=['hourly', 'daily', 'weekly', 'monthly'], aliases=['interval']),
+ schedule=dict(default=None),
+ time_zone=dict(default='UTC', aliases=['timezone']),
+ max_snaps=dict(type='int', default=8, aliases=['max']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ domain=dict(default=None),
+ account=dict(default=None),
+ project=dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_snapshot_policy = AnsibleCloudStackSnapshotPolicy(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ policy = acs_snapshot_policy.absent_snapshot_policy()
+ else:
+ policy = acs_snapshot_policy.present_snapshot_policy()
+
+ result = acs_snapshot_policy.get_result(policy)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_sshkeypair.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_sshkeypair.py
new file mode 100644
index 0000000000..c0c73d9f3b
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_sshkeypair.py
@@ -0,0 +1,242 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_sshkeypair
+short_description: Manages SSH keys on Apache CloudStack based clouds.
+description:
+ - Create, register and remove SSH keys.
+ - If no key was found and no public key was provided and a new SSH
+ private/public key pair will be created and the private key will be returned.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of public key.
+ required: true
+ domain:
+ description:
+ - Domain the public key is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the public key is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the public key to be registered in.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the public key.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ public_key:
+ description:
+ - String of the public key.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a new private / public key pair:
+- local_action: cs_sshkeypair name=linus@example.com
+ register: key
+- debug: msg='private key is {{ key.private_key }}'
+
+# remove a public key by its name:
+- local_action: cs_sshkeypair name=linus@example.com state=absent
+
+# register your existing local public key:
+- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}'
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the SSH public key.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the SSH public key.
+ returned: success
+ type: string
+ sample: linus@example.com
+fingerprint:
+ description: Fingerprint of the SSH public key.
+ returned: success
+ type: string
+ sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28"
+private_key:
+ description: Private key of generated SSH keypair.
+ returned: changed
+ type: string
+ sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n"
+'''
+
+try:
+ import sshpubkeys
+ has_lib_sshpubkeys = True
+except ImportError:
+ has_lib_sshpubkeys = False
+
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackSshKey(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSshKey, self).__init__(module)
+ self.returns = {
+ 'privatekey': 'private_key',
+ 'fingerprint': 'fingerprint',
+ }
+ self.ssh_key = None
+
+
+ def register_ssh_key(self, public_key):
+ ssh_key = self.get_ssh_key()
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+
+ res = None
+ if not ssh_key:
+ self.result['changed'] = True
+ args['publickey'] = public_key
+ if not self.module.check_mode:
+ res = self.cs.registerSSHKeyPair(**args)
+
+ else:
+ fingerprint = self._get_ssh_fingerprint(public_key)
+ if ssh_key['fingerprint'] != fingerprint:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.cs.deleteSSHKeyPair(**args)
+ args['publickey'] = public_key
+ res = self.cs.registerSSHKeyPair(**args)
+
+ if res and 'keypair' in res:
+ ssh_key = res['keypair']
+
+ return ssh_key
+
+
+ def create_ssh_key(self):
+ ssh_key = self.get_ssh_key()
+ if not ssh_key:
+ self.result['changed'] = True
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+ if not self.module.check_mode:
+ res = self.cs.createSSHKeyPair(**args)
+ ssh_key = res['keypair']
+ return ssh_key
+
+
+ def remove_ssh_key(self):
+ ssh_key = self.get_ssh_key()
+ if ssh_key:
+ self.result['changed'] = True
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+ if not self.module.check_mode:
+ res = self.cs.deleteSSHKeyPair(**args)
+ return ssh_key
+
+
+ def get_ssh_key(self):
+ if not self.ssh_key:
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+
+ ssh_keys = self.cs.listSSHKeyPairs(**args)
+ if ssh_keys and 'sshkeypair' in ssh_keys:
+ self.ssh_key = ssh_keys['sshkeypair'][0]
+ return self.ssh_key
+
+
+
+ def _get_ssh_fingerprint(self, public_key):
+ key = sshpubkeys.SSHKey(public_key)
+ return key.hash()
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ public_key = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ if not has_lib_sshpubkeys:
+ module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys")
+
+ try:
+ acs_sshkey = AnsibleCloudStackSshKey(module)
+ state = module.params.get('state')
+ if state in ['absent']:
+ ssh_key = acs_sshkey.remove_ssh_key()
+ else:
+ public_key = module.params.get('public_key')
+ if public_key:
+ ssh_key = acs_sshkey.register_ssh_key(public_key)
+ else:
+ ssh_key = acs_sshkey.create_ssh_key()
+
+ result = acs_sshkey.get_result(ssh_key)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_staticnat.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_staticnat.py
new file mode 100644
index 0000000000..1d721612b2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_staticnat.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_staticnat
+short_description: Manages static NATs on Apache CloudStack based clouds.
+description:
+ - Create, update and remove static NATs.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the static NAT is assigned to.
+ required: true
+ vm:
+ description:
+ - Name of virtual machine which we make the static NAT for.
+ - Required if C(state=present).
+ required: false
+ default: null
+ vm_guest_ip:
+ description:
+ - VM guest NIC secondary IP address for the static NAT.
+ required: false
+ default: false
+ network:
+ description:
+ - Network the IP address is related to.
+ required: false
+ default: null
+ version_added: "2.2"
+ state:
+ description:
+ - State of the static NAT.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the static NAT is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the static NAT is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the static NAT is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a static NAT: 1.2.3.4 -> web01
+- local_action:
+ module: cs_staticnat
+ ip_address: 1.2.3.4
+ vm: web01
+
+# remove a static NAT
+- local_action:
+ module: cs_staticnat
+ ip_address: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the ip_address.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+vm_name:
+ description: Name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_display_name:
+ description: Display name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_guest_ip:
+ description: IP of the virtual machine.
+ returned: success
+ type: string
+ sample: 10.101.65.152
+zone:
+ description: Name of zone the static NAT is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the static NAT is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the static NAT is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the static NAT is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackStaticNat(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackStaticNat, self).__init__(module)
+ self.returns = {
+ 'virtualmachinedisplayname': 'vm_display_name',
+ 'virtualmachinename': 'vm_name',
+ 'ipaddress': 'ip_address',
+ 'vmipaddress': 'vm_guest_ip',
+ }
+
+
+ def create_static_nat(self, ip_address):
+ self.result['changed'] = True
+ args = {}
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['ipaddressid'] = ip_address['id']
+ args['vmguestip'] = self.get_vm_guest_ip()
+ args['networkid'] = self.get_network(key='id')
+ if not self.module.check_mode:
+ res = self.cs.enableStaticNat(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ # reset ip address and query new values
+ self.ip_address = None
+ ip_address = self.get_ip_address()
+ return ip_address
+
+
+ def update_static_nat(self, ip_address):
+ args = {}
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['ipaddressid'] = ip_address['id']
+ args['vmguestip'] = self.get_vm_guest_ip()
+
+ # make an alias, so we can use _has_changed()
+ ip_address['vmguestip'] = ip_address['vmipaddress']
+ if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ self.poll_job(res, 'staticnat')
+ res = self.cs.enableStaticNat(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ # reset ip address and query new values
+ self.ip_address = None
+ ip_address = self.get_ip_address()
+ return ip_address
+
+
+ def present_static_nat(self):
+ ip_address = self.get_ip_address()
+ if not ip_address['isstaticnat']:
+ ip_address = self.create_static_nat(ip_address)
+ else:
+ ip_address = self.update_static_nat(ip_address)
+ return ip_address
+
+
+ def absent_static_nat(self):
+ ip_address = self.get_ip_address()
+ if ip_address['isstaticnat']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'staticnat')
+ return ip_address
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(required=True),
+ vm = dict(default=None),
+ vm_guest_ip = dict(default=None),
+ network = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_static_nat = AnsibleCloudStackStaticNat(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ ip_address = acs_static_nat.absent_static_nat()
+ else:
+ ip_address = acs_static_nat.present_static_nat()
+
+ result = acs_static_nat.get_result(ip_address)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_template.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_template.py
new file mode 100644
index 0000000000..3db1175518
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_template.py
@@ -0,0 +1,668 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_template
+short_description: Manages templates on Apache CloudStack based clouds.
+description:
+ - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot, extract and delete templates.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the template.
+ required: true
+ url:
+ description:
+ - URL of where the template is hosted on C(state=present).
+ - URL to which the template would be extracted on C(state=extracted).
+ - Mutually exclusive with C(vm).
+ required: false
+ default: null
+ vm:
+ description:
+ - VM name the template will be created from its volume or alternatively from a snapshot.
+ - VM must be in stopped state if created from its volume.
+ - Mutually exclusive with C(url).
+ required: false
+ default: null
+ snapshot:
+ description:
+ - Name of the snapshot, created from the VM ROOT volume, the template will be created from.
+ - C(vm) is required together with this argument.
+ required: false
+ default: null
+ os_type:
+ description:
+ - OS type that best represents the OS of this template.
+ required: false
+ default: null
+ checksum:
+ description:
+ - The MD5 checksum value of this template.
+ - If set, we search by checksum instead of name.
+ required: false
+ default: false
+ is_ready:
+ description:
+ - This flag is used for searching existing templates.
+ - If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed.
+ - Recommended to set it to C(false).
+ required: false
+ default: false
+ is_public:
+ description:
+ - Register the template to be publicly available to all users.
+ - Only used if C(state) is present.
+ required: false
+ default: false
+ is_featured:
+ description:
+ - Register the template to be featured.
+ - Only used if C(state) is present.
+ required: false
+ default: false
+ is_dynamically_scalable:
+ description:
+ - Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory.
+ - Only used if C(state) is present.
+ required: false
+ default: false
+ cross_zones:
+ description:
+ - Whether the template should be synced or removed across zones.
+ - Only used if C(state) is present or absent.
+ required: false
+ default: false
+ mode:
+ description:
+ - Mode for the template extraction.
+ - Only used if C(state=extracted).
+ required: false
+ default: 'http_download'
+ choices: [ 'http_download', 'ftp_upload' ]
+ domain:
+ description:
+ - Domain the template, snapshot or VM is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the template, snapshot or VM is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the template to be registered in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone you wish the template to be registered or deleted from.
+ - If not specified, first found zone will be used.
+ required: false
+ default: null
+ template_filter:
+ description:
+ - Name of the filter used to search for the template.
+ required: false
+ default: 'self'
+ choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
+ hypervisor:
+ description:
+ - Name the hypervisor to be used for creating the new template.
+ - Relevant when using C(state=present).
+ required: false
+ default: null
+ choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
+ requires_hvm:
+ description:
+ - true if this template requires HVM.
+ required: false
+ default: false
+ password_enabled:
+ description:
+ - True if the template supports the password reset feature.
+ required: false
+ default: false
+ template_tag:
+ description:
+ - the tag for this template.
+ required: false
+ default: null
+ sshkey_enabled:
+ description:
+ - True if the template supports the sshkey upload feature.
+ required: false
+ default: false
+ is_routing:
+ description:
+ - True if the template type is routing i.e., if template is used to deploy router.
+ - Only considered if C(url) is used.
+ required: false
+ default: false
+ format:
+ description:
+ - The format for the template.
+ - Relevant when using C(state=present).
+ required: false
+ default: null
+ choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ]
+ is_extractable:
+ description:
+ - True if the template or its derivatives are extractable.
+ required: false
+ default: false
+ details:
+ description:
+ - Template details in key/value pairs.
+ required: false
+ default: null
+ bits:
+ description:
+ - 32 or 64 bits support.
+ required: false
+ default: '64'
+ display_text:
+ description:
+ - Display text of the template.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the template.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'extacted' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Register a systemvm template
+- local_action:
+ module: cs_template
+ name: systemvm-vmware-4.5
+ url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova"
+ hypervisor: VMware
+ format: OVA
+ cross_zones: yes
+ os_type: Debian GNU/Linux 7(64-bit)
+
+# Create a template from a stopped virtual machine's volume
+- local_action:
+ module: cs_template
+ name: debian-base-template
+ vm: debian-base-vm
+ os_type: Debian GNU/Linux 7(64-bit)
+ zone: tokio-ix
+ password_enabled: yes
+ is_public: yes
+
+# Create a template from a virtual machine's root volume snapshot
+- local_action:
+ module: cs_template
+ name: debian-base-template
+ vm: debian-base-vm
+ snapshot: ROOT-233_2015061509114
+ os_type: Debian GNU/Linux 7(64-bit)
+ zone: tokio-ix
+ password_enabled: yes
+ is_public: yes
+
+# Remove a template
+- local_action:
+ module: cs_template
+ name: systemvm-4.2
+ cross_zones: yes
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the template.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the template.
+ returned: success
+ type: string
+ sample: Debian 7 64-bit
+display_text:
+ description: Display text of the template.
+ returned: success
+ type: string
+ sample: Debian 7.7 64-bit minimal 2015-03-19
+checksum:
+ description: MD5 checksum of the template.
+ returned: success
+ type: string
+ sample: 0b31bccccb048d20b551f70830bb7ad0
+status:
+ description: Status of the template.
+ returned: success
+ type: string
+ sample: Download Complete
+is_ready:
+ description: True if the template is ready to be deployed from.
+ returned: success
+ type: boolean
+ sample: true
+is_public:
+ description: True if the template is public.
+ returned: success
+ type: boolean
+ sample: true
+is_featured:
+ description: True if the template is featured.
+ returned: success
+ type: boolean
+ sample: true
+is_extractable:
+ description: True if the template is extractable.
+ returned: success
+ type: boolean
+ sample: true
+format:
+ description: Format of the template.
+ returned: success
+ type: string
+ sample: OVA
+os_type:
+ description: Typo of the OS.
+ returned: success
+ type: string
+ sample: CentOS 6.5 (64-bit)
+password_enabled:
+ description: True if the reset password feature is enabled, false otherwise.
+ returned: success
+ type: boolean
+ sample: false
+sshkey_enabled:
+ description: true if template is sshkey enabled, false otherwise.
+ returned: success
+ type: boolean
+ sample: false
+cross_zones:
+ description: true if the template is managed across all zones, false otherwise.
+ returned: success
+ type: boolean
+ sample: false
+template_type:
+ description: Type of the template.
+ returned: success
+ type: string
+ sample: USER
+created:
+ description: Date of registering.
+ returned: success
+ type: string
+ sample: 2015-03-29T14:57:06+0200
+template_tag:
+ description: Template tag related to this template.
+ returned: success
+ type: string
+ sample: special
+hypervisor:
+ description: Hypervisor related to this template.
+ returned: success
+ type: string
+ sample: VMware
+mode:
+ description: Mode of extraction
+ returned: success
+ type: string
+ sample: http_download
+state:
+ description: State of the extracted template
+ returned: success
+ type: string
+ sample: DOWNLOAD_URL_CREATED
+url:
+ description: Url to which the template is extracted to
+ returned: success
+ type: string
+ sample: "http://1.2.3.4/userdata/eb307f13-4aca-45e8-b157-a414a14e6b04.ova"
+tags:
+ description: List of resource tags associated with the template.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+zone:
+ description: Name of zone the template is registered in.
+ returned: success
+ type: string
+ sample: zuerich
+domain:
+ description: Domain the template is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the template is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the template is related to.
+ returned: success
+ type: string
+ sample: Production
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackTemplate(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackTemplate, self).__init__(module)
+ self.returns = {
+ 'checksum': 'checksum',
+ 'status': 'status',
+ 'isready': 'is_ready',
+ 'templatetag': 'template_tag',
+ 'sshkeyenabled': 'sshkey_enabled',
+ 'passwordenabled': 'password_enabled',
+ 'tempaltetype': 'template_type',
+ 'ostypename': 'os_type',
+ 'crossZones': 'cross_zones',
+ 'isextractable': 'is_extractable',
+ 'isfeatured': 'is_featured',
+ 'ispublic': 'is_public',
+ 'format': 'format',
+ 'hypervisor': 'hypervisor',
+ 'url': 'url',
+ 'extractMode': 'mode',
+ 'state': 'state',
+ }
+
+
+ def _get_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+ args['bits'] = self.module.params.get('bits')
+ args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
+ args['isextractable'] = self.module.params.get('is_extractable')
+ args['isfeatured'] = self.module.params.get('is_featured')
+ args['ispublic'] = self.module.params.get('is_public')
+ args['passwordenabled'] = self.module.params.get('password_enabled')
+ args['requireshvm'] = self.module.params.get('requires_hvm')
+ args['templatetag'] = self.module.params.get('template_tag')
+ args['ostypeid'] = self.get_os_type(key='id')
+
+ if not args['ostypeid']:
+ self.module.fail_json(msg="Missing required arguments: os_type")
+
+ return args
+
+
+ def get_root_volume(self, key=None):
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['type'] = "ROOT"
+
+ volumes = self.cs.listVolumes(**args)
+ if volumes:
+ return self._get_by_key(key, volumes['volume'][0])
+ self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name'))
+
+
+ def get_snapshot(self, key=None):
+ snapshot = self.module.params.get('snapshot')
+ if not snapshot:
+ return None
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['volumeid'] = self.get_root_volume('id')
+ snapshots = self.cs.listSnapshots(**args)
+ if snapshots:
+ for s in snapshots['snapshot']:
+ if snapshot in [ s['name'], s['id'] ]:
+ return self._get_by_key(key, s)
+ self.module.fail_json(msg="Snapshot '%s' not found" % snapshot)
+
+
+ def create_template(self):
+ template = self.get_template()
+ if not template:
+ self.result['changed'] = True
+
+ args = self._get_args()
+ snapshot_id = self.get_snapshot(key='id')
+ if snapshot_id:
+ args['snapshotid'] = snapshot_id
+ else:
+ args['volumeid'] = self.get_root_volume('id')
+
+ if not self.module.check_mode:
+ template = self.cs.createTemplate(**args)
+
+ if 'errortext' in template:
+ self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ template = self.poll_job(template, 'template')
+ return template
+
+
+ def register_template(self):
+ required_params = [
+ 'format',
+ 'url',
+ 'hypervisor',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+ template = self.get_template()
+ if not template:
+ self.result['changed'] = True
+ args = self._get_args()
+ args['url'] = self.module.params.get('url')
+ args['format'] = self.module.params.get('format')
+ args['checksum'] = self.module.params.get('checksum')
+ args['isextractable'] = self.module.params.get('is_extractable')
+ args['isrouting'] = self.module.params.get('is_routing')
+ args['sshkeyenabled'] = self.module.params.get('sshkey_enabled')
+ args['hypervisor'] = self.get_hypervisor()
+ args['domainid'] = self.get_domain(key='id')
+ args['account'] = self.get_account(key='name')
+ args['projectid'] = self.get_project(key='id')
+
+ if not self.module.params.get('cross_zones'):
+ args['zoneid'] = self.get_zone(key='id')
+ else:
+ args['zoneid'] = -1
+
+ if not self.module.check_mode:
+ res = self.cs.registerTemplate(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ template = res['template']
+ return template
+
+
+ def get_template(self):
+ args = {}
+ args['isready'] = self.module.params.get('is_ready')
+ args['templatefilter'] = self.module.params.get('template_filter')
+ args['domainid'] = self.get_domain(key='id')
+ args['account'] = self.get_account(key='name')
+ args['projectid'] = self.get_project(key='id')
+
+ if not self.module.params.get('cross_zones'):
+ args['zoneid'] = self.get_zone(key='id')
+
+ # if checksum is set, we only look on that.
+ checksum = self.module.params.get('checksum')
+ if not checksum:
+ args['name'] = self.module.params.get('name')
+
+ templates = self.cs.listTemplates(**args)
+ if templates:
+ # if checksum is set, we only look on that.
+ if not checksum:
+ return templates['template'][0]
+ else:
+ for i in templates['template']:
+ if 'checksum' in i and i['checksum'] == checksum:
+ return i
+ return None
+
+
+ def extract_template(self):
+ template = self.get_template()
+ if not template:
+ self.module.fail_json(msg="Failed: template not found")
+
+ args = {}
+ args['id'] = template['id']
+ args['url'] = self.module.params.get('url')
+ args['mode'] = self.module.params.get('mode')
+ args['zoneid'] = self.get_zone(key='id')
+
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ template = self.cs.extractTemplate(**args)
+
+ if 'errortext' in template:
+ self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ template = self.poll_job(template, 'template')
+ return template
+
+
+ def remove_template(self):
+ template = self.get_template()
+ if template:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = template['id']
+
+ if not self.module.params.get('cross_zones'):
+ args['zoneid'] = self.get_zone(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.deleteTemplate(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'template')
+ return template
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ display_text = dict(default=None),
+ url = dict(default=None),
+ vm = dict(default=None),
+ snapshot = dict(default=None),
+ os_type = dict(default=None),
+ is_ready = dict(type='bool', default=False),
+ is_public = dict(type='bool', default=True),
+ is_featured = dict(type='bool', default=False),
+ is_dynamically_scalable = dict(type='bool', default=False),
+ is_extractable = dict(type='bool', default=False),
+ is_routing = dict(type='bool', default=False),
+ checksum = dict(default=None),
+ template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
+ hypervisor = dict(choices=CS_HYPERVISORS, default=None),
+ requires_hvm = dict(type='bool', default=False),
+ password_enabled = dict(type='bool', default=False),
+ template_tag = dict(default=None),
+ sshkey_enabled = dict(type='bool', default=False),
+ format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None),
+ details = dict(default=None),
+ bits = dict(type='int', choices=[ 32, 64 ], default=64),
+ state = dict(choices=['present', 'absent', 'extracted'], default='present'),
+ cross_zones = dict(type='bool', default=False),
+ mode = dict(choices=['http_download', 'ftp_upload'], default='http_download'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ mutually_exclusive = (
+ ['url', 'vm'],
+ ['zone', 'cross_zones'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_tpl = AnsibleCloudStackTemplate(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ tpl = acs_tpl.remove_template()
+
+ elif state in ['extracted']:
+ tpl = acs_tpl.extract_template()
+
+ else:
+ if module.params.get('url'):
+ tpl = acs_tpl.register_template()
+ elif module.params.get('vm'):
+ tpl = acs_tpl.create_template()
+ else:
+ module.fail_json(msg="one of the following is required on state=present: url,vm")
+
+ result = acs_tpl.get_result(tpl)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_user.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_user.py
new file mode 100644
index 0000000000..bee4653d16
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_user.py
@@ -0,0 +1,451 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_user
+short_description: Manages users on Apache CloudStack based clouds.
+description:
+ - Create, update, disable, lock, enable and remove users.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ username:
+ description:
+ - Username of the user.
+ required: true
+ account:
+ description:
+ - Account the user will be created under.
+ - Required on C(state=present).
+ required: false
+ default: null
+ password:
+ description:
+ - Password of the user to be created.
+ - Required on C(state=present).
+ - Only considered on creation and will not be updated if user exists.
+ required: false
+ default: null
+ first_name:
+ description:
+ - First name of the user.
+ - Required on C(state=present).
+ required: false
+ default: null
+ last_name:
+ description:
+ - Last name of the user.
+ - Required on C(state=present).
+ required: false
+ default: null
+ email:
+ description:
+ - Email of the user.
+ - Required on C(state=present).
+ required: false
+ default: null
+ timezone:
+ description:
+ - Timezone of the user.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the user is related to.
+ required: false
+ default: 'ROOT'
+ state:
+ description:
+ - State of the user.
+ - C(unlocked) is an alias for C(enabled).
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create an user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ account: developers
+ username: johndoe
+ password: S3Cur3
+ last_name: Doe
+ first_name: John
+ email: john.doe@example.com
+ domain: CUSTOMERS
+
+# Lock an existing user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ username: johndoe
+ domain: CUSTOMERS
+ state: locked
+
+# Disable an existing user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ username: johndoe
+ domain: CUSTOMERS
+ state: disabled
+
+# Enable/unlock an existing user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ username: johndoe
+ domain: CUSTOMERS
+ state: enabled
+
+# Remove an user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ name: customer_xy
+ domain: CUSTOMERS
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the user.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+username:
+ description: Username of the user.
+ returned: success
+ type: string
+ sample: johndoe
+fist_name:
+ description: First name of the user.
+ returned: success
+ type: string
+ sample: John
+last_name:
+ description: Last name of the user.
+ returned: success
+ type: string
+ sample: Doe
+email:
+ description: Emailof the user.
+ returned: success
+ type: string
+ sample: john.doe@example.com
+api_key:
+ description: API key of the user.
+ returned: success
+ type: string
+ sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg
+api_secret:
+ description: API secret of the user.
+ returned: success
+ type: string
+ sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g
+account:
+ description: Account name of the user.
+ returned: success
+ type: string
+ sample: developers
+account_type:
+ description: Type of the account.
+ returned: success
+ type: string
+ sample: user
+timezone:
+ description: Timezone of the user.
+ returned: success
+ type: string
+ sample: enabled
+created:
+ description: Date the user was created.
+ returned: success
+ type: string
+ sample: Doe
+state:
+ description: State of the user.
+ returned: success
+ type: string
+ sample: enabled
+domain:
+ description: Domain the user is related.
+ returned: success
+ type: string
+ sample: ROOT
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackUser(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackUser, self).__init__(module)
+ self.returns = {
+ 'username': 'username',
+ 'firstname': 'first_name',
+ 'lastname': 'last_name',
+ 'email': 'email',
+ 'secretkey': 'api_secret',
+ 'apikey': 'api_key',
+ 'timezone': 'timezone',
+ }
+ self.account_types = {
+ 'user': 0,
+ 'root_admin': 1,
+ 'domain_admin': 2,
+ }
+ self.user = None
+
+
+ def get_account_type(self):
+ account_type = self.module.params.get('account_type')
+ return self.account_types[account_type]
+
+
+ def get_user(self):
+ if not self.user:
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ users = self.cs.listUsers(**args)
+ if users:
+ user_name = self.module.params.get('username')
+ for u in users['user']:
+ if user_name.lower() == u['username'].lower():
+ self.user = u
+ break
+ return self.user
+
+
+ def enable_user(self):
+ user = self.get_user()
+ if not user:
+ user = self.present_user()
+
+ if user['state'].lower() != 'enabled':
+ self.result['changed'] = True
+ args = {}
+ args['id'] = user['id']
+ if not self.module.check_mode:
+ res = self.cs.enableUser(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user = res['user']
+ return user
+
+
+ def lock_user(self):
+ user = self.get_user()
+ if not user:
+ user = self.present_user()
+
+ # we need to enable the user to lock it.
+ if user['state'].lower() == 'disabled':
+ user = self.enable_user()
+
+ if user['state'].lower() != 'locked':
+ self.result['changed'] = True
+ args = {}
+ args['id'] = user['id']
+ if not self.module.check_mode:
+ res = self.cs.lockUser(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ user = res['user']
+ return user
+
+
+ def disable_user(self):
+ user = self.get_user()
+ if not user:
+ user = self.present_user()
+
+ if user['state'].lower() != 'disabled':
+ self.result['changed'] = True
+ args = {}
+ args['id'] = user['id']
+ if not self.module.check_mode:
+ user = self.cs.disableUser(**args)
+ if 'errortext' in user:
+ self.module.fail_json(msg="Failed: '%s'" % user['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ user = self.poll_job(user, 'user')
+ return user
+
+
+ def present_user(self):
+ missing_params = []
+ for required_params in [
+ 'account',
+ 'email',
+ 'password',
+ 'first_name',
+ 'last_name',
+ ]:
+ if not self.module.params.get(required_params):
+ missing_params.append(required_params)
+ if missing_params:
+ self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
+
+ user = self.get_user()
+ if user:
+ user = self._update_user(user)
+ else:
+ user = self._create_user(user)
+ return user
+
+
+ def _create_user(self, user):
+ self.result['changed'] = True
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain('id')
+ args['username'] = self.module.params.get('username')
+ args['password'] = self.module.params.get('password')
+ args['firstname'] = self.module.params.get('first_name')
+ args['lastname'] = self.module.params.get('last_name')
+ args['email'] = self.module.params.get('email')
+ args['timezone'] = self.module.params.get('timezone')
+ if not self.module.check_mode:
+ res = self.cs.createUser(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user = res['user']
+ # register user api keys
+ res = self.cs.registerUserKeys(id=user['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user.update(res['userkeys'])
+ return user
+
+
+ def _update_user(self, user):
+ args = {}
+ args['id'] = user['id']
+ args['firstname'] = self.module.params.get('first_name')
+ args['lastname'] = self.module.params.get('last_name')
+ args['email'] = self.module.params.get('email')
+ args['timezone'] = self.module.params.get('timezone')
+ if self.has_changed(args, user):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateUser(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user = res['user']
+ # register user api keys
+ if 'apikey' not in user:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.registerUserKeys(id=user['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user.update(res['userkeys'])
+ return user
+
+
+ def absent_user(self):
+ user = self.get_user()
+ if user:
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.deleteUser(id=user['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return user
+
+
+ def get_result(self, user):
+ super(AnsibleCloudStackUser, self).get_result(user)
+ if user:
+ if 'accounttype' in user:
+ for key,value in self.account_types.items():
+ if value == user['accounttype']:
+ self.result['account_type'] = key
+ break
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ username = dict(required=True),
+ account = dict(default=None),
+ state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
+ domain = dict(default='ROOT'),
+ email = dict(default=None),
+ first_name = dict(default=None),
+ last_name = dict(default=None),
+ password = dict(default=None, no_log=True),
+ timezone = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_acc = AnsibleCloudStackUser(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent']:
+ user = acs_acc.absent_user()
+
+ elif state in ['enabled', 'unlocked']:
+ user = acs_acc.enable_user()
+
+ elif state in ['disabled']:
+ user = acs_acc.disable_user()
+
+ elif state in ['locked']:
+ user = acs_acc.lock_user()
+
+ else:
+ user = acs_acc.present_user()
+
+ result = acs_acc.get_result(user)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_vmsnapshot.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_vmsnapshot.py
new file mode 100644
index 0000000000..29d1914993
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_vmsnapshot.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_vmsnapshot
+short_description: Manages VM snapshots on Apache CloudStack based clouds.
+description:
+ - Create, remove and revert VM from snapshots.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Unique Name of the snapshot. In CloudStack terms display name.
+ required: true
+ aliases: ['display_name']
+ vm:
+ description:
+ - Name of the virtual machine.
+ required: true
+ description:
+ description:
+ - Description of the snapshot.
+ required: false
+ default: null
+ snapshot_memory:
+ description:
+ - Snapshot memory if set to true.
+ required: false
+ default: false
+ zone:
+ description:
+ - Name of the zone in which the VM is in. If not set, default zone is used.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the VM is assigned to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the snapshot.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'revert' ]
+ domain:
+ description:
+ - Domain the VM snapshot is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the VM snapshot is related to.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a VM snapshot of disk and memory before an upgrade
+- local_action:
+ module: cs_vmsnapshot
+ name: Snapshot before upgrade
+ vm: web-01
+ snapshot_memory: yes
+
+# Revert a VM to a snapshot after a failed upgrade
+- local_action:
+ module: cs_vmsnapshot
+ name: Snapshot before upgrade
+ vm: web-01
+ state: revert
+
+# Remove a VM snapshot after successful upgrade
+- local_action:
+ module: cs_vmsnapshot
+ name: Snapshot before upgrade
+ vm: web-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the snapshot.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the snapshot.
+ returned: success
+ type: string
+ sample: snapshot before update
+display_name:
+ description: Display name of the snapshot.
+ returned: success
+ type: string
+ sample: snapshot before update
+created:
+ description: date of the snapshot.
+ returned: success
+ type: string
+ sample: 2015-03-29T14:57:06+0200
+current:
+ description: true if snapshot is current
+ returned: success
+ type: boolean
+ sample: True
+state:
+ description: state of the vm snapshot
+ returned: success
+ type: string
+ sample: Allocated
+type:
+ description: type of vm snapshot
+ returned: success
+ type: string
+ sample: DiskAndMemory
+description:
+ description: description of vm snapshot
+ returned: success
+ type: string
+ sample: snapshot brought to you by Ansible
+domain:
+ description: Domain the the vm snapshot is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the vm snapshot is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the vm snapshot is related to.
+ returned: success
+ type: string
+ sample: Production
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackVmSnapshot, self).__init__(module)
+ self.returns = {
+ 'type': 'type',
+ 'current': 'current',
+ }
+
+
+ def get_snapshot(self):
+ args = {}
+ args['virtualmachineid'] = self.get_vm('id')
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+
+ snapshots = self.cs.listVMSnapshot(**args)
+ if snapshots:
+ return snapshots['vmSnapshot'][0]
+ return None
+
+
+ def create_snapshot(self):
+ snapshot = self.get_snapshot()
+ if not snapshot:
+ self.result['changed'] = True
+
+ args = {}
+ args['virtualmachineid'] = self.get_vm('id')
+ args['name'] = self.module.params.get('name')
+ args['description'] = self.module.params.get('description')
+ args['snapshotmemory'] = self.module.params.get('snapshot_memory')
+
+ if not self.module.check_mode:
+ res = self.cs.createVMSnapshot(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ snapshot = self.poll_job(res, 'vmsnapshot')
+
+ return snapshot
+
+
+ def remove_snapshot(self):
+ snapshot = self.get_snapshot()
+ if snapshot:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'vmsnapshot')
+ return snapshot
+
+
+ def revert_vm_to_snapshot(self):
+ snapshot = self.get_snapshot()
+ if snapshot:
+ self.result['changed'] = True
+
+ if snapshot['state'] != "Ready":
+ self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
+
+ if not self.module.check_mode:
+ res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'vmsnapshot')
+ return snapshot
+
+ self.module.fail_json(msg="snapshot not found, could not revert VM")
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True, aliases=['display_name']),
+ vm = dict(required=True),
+ description = dict(default=None),
+ zone = dict(default=None),
+ snapshot_memory = dict(type='bool', default=False),
+ state = dict(choices=['present', 'absent', 'revert'], default='present'),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ required_together = cs_required_together()
+ required_together.extend([
+ ['icmp_type', 'icmp_code'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ supports_check_mode=True
+ )
+
+ try:
+ acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
+
+ state = module.params.get('state')
+ if state in ['revert']:
+ snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
+ elif state in ['absent']:
+ snapshot = acs_vmsnapshot.remove_snapshot()
+ else:
+ snapshot = acs_vmsnapshot.create_snapshot()
+
+ result = acs_vmsnapshot.get_result(snapshot)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_volume.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_volume.py
new file mode 100644
index 0000000000..c2a542741d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_volume.py
@@ -0,0 +1,492 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Jefferson Girão <jefferson@girao.net>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_volume
+short_description: Manages volumes on Apache CloudStack based clouds.
+description:
+ - Create, destroy, attach, detach volumes.
+version_added: "2.1"
+author:
+ - "Jefferson Girão (@jeffersongirao)"
+ - "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the volume.
+ - C(name) can only contain ASCII letters.
+ required: true
+ account:
+ description:
+ - Account the volume is related to.
+ required: false
+ default: null
+ custom_id:
+ description:
+ - Custom id to the resource.
+ - Allowed to Root Admins only.
+ required: false
+ default: null
+ disk_offering:
+ description:
+ - Name of the disk offering to be used.
+ - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
+ required: false
+ default: null
+ display_volume:
+ description:
+ - Whether to display the volume to the end user or not.
+ - Allowed to Root Admins only.
+ required: false
+ default: true
+ domain:
+ description:
+ - Name of the domain the volume to be deployed in.
+ required: false
+ default: null
+ max_iops:
+ description:
+ - Max iops
+ required: false
+ default: null
+ min_iops:
+ description:
+ - Min iops
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the volume to be deployed in.
+ required: false
+ default: null
+ size:
+ description:
+ - Size of disk in GB
+ required: false
+ default: null
+ snapshot:
+ description:
+ - The snapshot name for the disk volume.
+ - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
+ required: false
+ default: null
+ force:
+ description:
+ - Force removal of volume even it is attached to a VM.
+ - Considered on C(state=absnet) only.
+ required: false
+ default: false
+ shrink_ok:
+ description:
+ - Whether to allow to shrink the volume.
+ required: false
+ default: false
+ vm:
+ description:
+ - Name of the virtual machine to attach the volume to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the volume should be deployed.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the volume.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'attached', 'detached' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create volume within project, zone with specified storage options
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ project: Integration
+ zone: ch-zrh-ix-01
+ disk_offering: PerfPlus Storage
+ size: 20
+
+# Create/attach volume to instance
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ disk_offering: PerfPlus Storage
+ size: 20
+ vm: web-vm-1
+ state: attached
+
+# Detach volume
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ state: detached
+
+# Remove volume
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: ID of the volume.
+ returned: success
+ type: string
+ sample:
+name:
+ description: Name of the volume.
+ returned: success
+ type: string
+ sample: web-volume-01
+display_name:
+ description: Display name of the volume.
+ returned: success
+ type: string
+ sample: web-volume-01
+group:
+ description: Group the volume belongs to
+ returned: success
+ type: string
+ sample: web
+domain:
+ description: Domain the volume belongs to
+ returned: success
+ type: string
+ sample: example domain
+project:
+ description: Project the volume belongs to
+ returned: success
+ type: string
+ sample: Production
+zone:
+ description: Name of zone the volume is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+created:
+ description: Date of the volume was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+attached:
+ description: Date of the volume was attached.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+type:
+ description: Disk volume type.
+ returned: success
+ type: string
+ sample: DATADISK
+size:
+ description: Size of disk volume.
+ returned: success
+ type: string
+ sample: 20
+vm:
+ description: Name of the vm the volume is attached to (not returned when detached)
+ returned: success
+ type: string
+ sample: web-01
+state:
+ description: State of the volume
+ returned: success
+ type: string
+ sample: Attached
+device_id:
+ description: Id of the device on user vm the volume is attached to (not returned when detached)
+ returned: success
+ type: string
+ sample: 1
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackVolume(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackVolume, self).__init__(module)
+ self.returns = {
+ 'group': 'group',
+ 'attached': 'attached',
+ 'vmname': 'vm',
+ 'deviceid': 'device_id',
+ 'type': 'type',
+ 'size': 'size',
+ }
+ self.volume = None
+
+ #TODO implement in cloudstack utils
+ def get_disk_offering(self, key=None):
+ disk_offering = self.module.params.get('disk_offering')
+ if not disk_offering:
+ return None
+
+ # Do not add domain filter for disk offering listing.
+ disk_offerings = self.cs.listDiskOfferings()
+ if disk_offerings:
+ for d in disk_offerings['diskoffering']:
+ if disk_offering in [d['displaytext'], d['name'], d['id']]:
+ return self._get_by_key(key, d)
+ self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
+
+
+ def get_volume(self):
+ if not self.volume:
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['displayvolume'] = self.module.params.get('display_volume')
+ args['type'] = 'DATADISK'
+
+ volumes = self.cs.listVolumes(**args)
+ if volumes:
+ volume_name = self.module.params.get('name')
+ for v in volumes['volume']:
+ if volume_name.lower() == v['name'].lower():
+ self.volume = v
+ break
+ return self.volume
+
+
+ def get_snapshot(self, key=None):
+ snapshot = self.module.params.get('snapshot')
+ if not snapshot:
+ return None
+
+ args = {}
+ args['name'] = snapshot
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ snapshots = self.cs.listSnapshots(**args)
+ if snapshots:
+ return self._get_by_key(key, snapshots['snapshot'][0])
+ self.module.fail_json(msg="Snapshot with name %s not found" % snapshot)
+
+
+ def present_volume(self):
+ volume = self.get_volume()
+ if volume:
+ volume = self.update_volume(volume)
+ else:
+ disk_offering_id = self.get_disk_offering(key='id')
+ snapshot_id = self.get_snapshot(key='id')
+
+ if not disk_offering_id and not snapshot_id:
+ self.module.fail_json(msg="Required one of: disk_offering,snapshot")
+
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['diskofferingid'] = disk_offering_id
+ args['displayvolume'] = self.module.params.get('display_volume')
+ args['maxiops'] = self.module.params.get('max_iops')
+ args['miniops'] = self.module.params.get('min_iops')
+ args['projectid'] = self.get_project(key='id')
+ args['size'] = self.module.params.get('size')
+ args['snapshotid'] = snapshot_id
+ args['zoneid'] = self.get_zone(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.createVolume(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ return volume
+
+
+ def attached_volume(self):
+ volume = self.present_volume()
+
+ if volume:
+ if volume.get('virtualmachineid') != self.get_vm(key='id'):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ volume = self.detached_volume()
+
+ if 'attached' not in volume:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = volume['id']
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['deviceid'] = self.module.params.get('device_id')
+
+ if not self.module.check_mode:
+ res = self.cs.attachVolume(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ return volume
+
+
+ def detached_volume(self):
+ volume = self.present_volume()
+
+ if volume:
+ if 'attached' not in volume:
+ return volume
+
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.detachVolume(id=volume['id'])
+ if 'errortext' in volume:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ return volume
+
+
+ def absent_volume(self):
+ volume = self.get_volume()
+
+ if volume:
+ if 'attached' in volume and not self.module.params.get('force'):
+ self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name'))
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ volume = self.detached_volume()
+
+ res = self.cs.deleteVolume(id=volume['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'volume')
+
+ return volume
+
+
+ def update_volume(self, volume):
+ args_resize = {}
+ args_resize['id'] = volume['id']
+ args_resize['diskofferingid'] = self.get_disk_offering(key='id')
+ args_resize['maxiops'] = self.module.params.get('max_iops')
+ args_resize['miniops'] = self.module.params.get('min_iops')
+ args_resize['size'] = self.module.params.get('size')
+
+ # change unit from bytes to giga bytes to compare with args
+ volume_copy = volume.copy()
+ volume_copy['size'] = volume_copy['size'] / (2**30)
+
+ if self.has_changed(args_resize, volume_copy):
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args_resize['shrinkok'] = self.module.params.get('shrink_ok')
+ res = self.cs.resizeVolume(**args_resize)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ self.volume = volume
+
+ return volume
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ disk_offering = dict(default=None),
+ display_volume = dict(type='bool', default=None),
+ max_iops = dict(type='int', default=None),
+ min_iops = dict(type='int', default=None),
+ size = dict(type='int', default=None),
+ snapshot = dict(default=None),
+ vm = dict(default=None),
+ device_id = dict(type='int', default=None),
+ custom_id = dict(default=None),
+ force = dict(type='bool', default=False),
+ shrink_ok = dict(type='bool', default=False),
+ state = dict(choices=['present', 'absent', 'attached', 'detached'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ mutually_exclusive = (
+ ['snapshot', 'disk_offering'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_vol = AnsibleCloudStackVolume(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent']:
+ volume = acs_vol.absent_volume()
+ elif state in ['attached']:
+ volume = acs_vol.attached_volume()
+ elif state in ['detached']:
+ volume = acs_vol.detached_volume()
+ else:
+ volume = acs_vol.present_volume()
+
+ result = acs_vol.get_result(volume)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_zone.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_zone.py
new file mode 100644
index 0000000000..2a343e0b97
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_zone.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_zone
+short_description: Manages zones on Apache CloudStack based clouds.
+description:
+ - Create, update and remove zones.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the zone.
+ required: true
+ id:
+ description:
+ - uuid of the exising zone.
+ default: null
+ required: false
+ state:
+ description:
+ - State of the zone.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'enabled', 'disabled', 'absent' ]
+ domain:
+ description:
+ - Domain the zone is related to.
+ - Zone is a public zone if not set.
+ required: false
+ default: null
+ network_domain:
+ description:
+ - Network domain for the zone.
+ required: false
+ default: null
+ network_type:
+ description:
+ - Network type of the zone.
+ required: false
+ default: basic
+ choices: [ 'basic', 'advanced' ]
+ dns1:
+ description:
+ - First DNS for the zone.
+ - Required if C(state=present)
+ required: false
+ default: null
+ dns2:
+ description:
+ - Second DNS for the zone.
+ required: false
+ default: null
+ internal_dns1:
+ description:
+ - First internal DNS for the zone.
+ - If not set C(dns1) will be used on C(state=present).
+ required: false
+ default: null
+ internal_dns2:
+ description:
+ - Second internal DNS for the zone.
+ required: false
+ default: null
+ dns1_ipv6:
+ description:
+ - First DNS for IPv6 for the zone.
+ required: false
+ default: null
+ dns2_ipv6:
+ description:
+ - Second DNS for IPv6 for the zone.
+ required: false
+ default: null
+ guest_cidr_address:
+ description:
+ - Guest CIDR address for the zone.
+ required: false
+ default: null
+ dhcp_provider:
+ description:
+ - DHCP provider for the Zone.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a zone is present
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ dns1: 8.8.8.8
+ dns2: 8.8.4.4
+ network_type: basic
+
+# Ensure a zone is disabled
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ state: disabled
+
+# Ensure a zone is enabled
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ state: enabled
+
+# Ensure a zone is absent
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the zone.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the zone.
+ returned: success
+ type: string
+ sample: zone01
+dns1:
+ description: First DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+dns2:
+ description: Second DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+internal_dns1:
+ description: First internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+internal_dns2:
+ description: Second internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+dns1_ipv6:
+ description: First IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8888"
+dns2_ipv6:
+ description: Second IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8844"
+allocation_state:
+ description: State of the zone.
+ returned: success
+ type: string
+ sample: Enabled
+domain:
+ description: Domain the zone is related to.
+ returned: success
+ type: string
+ sample: ROOT
+network_domain:
+ description: Network domain for the zone.
+ returned: success
+ type: string
+ sample: example.com
+network_type:
+ description: Network type for the zone.
+ returned: success
+ type: string
+ sample: basic
+local_storage_enabled:
+ description: Local storage offering enabled.
+ returned: success
+ type: bool
+ sample: false
+securitygroups_enabled:
+ description: Security groups support is enabled.
+ returned: success
+ type: bool
+ sample: false
+guest_cidr_address:
+ description: Guest CIDR address for the zone
+ returned: success
+ type: string
+ sample: 10.1.1.0/24
+dhcp_provider:
+ description: DHCP provider for the zone
+ returned: success
+ type: string
+ sample: VirtualRouter
+zone_token:
+ description: Zone token
+ returned: success
+ type: string
+ sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
+tags:
+ description: List of resource tags associated with the zone.
+ returned: success
+ type: dict
+ sample: [ { "key": "foo", "value": "bar" } ]
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackZone(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackZone, self).__init__(module)
+ self.returns = {
+ 'dns1': 'dns1',
+ 'dns2': 'dns2',
+ 'internaldns1': 'internal_dns1',
+ 'internaldns2': 'internal_dns2',
+ 'ipv6dns1': 'dns1_ipv6',
+ 'ipv6dns2': 'dns2_ipv6',
+ 'domain': 'network_domain',
+ 'networktype': 'network_type',
+ 'securitygroupsenabled': 'securitygroups_enabled',
+ 'localstorageenabled': 'local_storage_enabled',
+ 'guestcidraddress': 'guest_cidr_address',
+ 'dhcpprovider': 'dhcp_provider',
+ 'allocationstate': 'allocation_state',
+ 'zonetoken': 'zone_token',
+ }
+ self.zone = None
+
+
+ def _get_common_zone_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['dns1'] = self.module.params.get('dns1')
+ args['dns2'] = self.module.params.get('dns2')
+ args['internaldns1'] = self.get_or_fallback('internal_dns1', 'dns1')
+ args['internaldns2'] = self.get_or_fallback('internal_dns2', 'dns2')
+ args['ipv6dns1'] = self.module.params.get('dns1_ipv6')
+ args['ipv6dns2'] = self.module.params.get('dns2_ipv6')
+ args['networktype'] = self.module.params.get('network_type')
+ args['domain'] = self.module.params.get('network_domain')
+ args['localstorageenabled'] = self.module.params.get('local_storage_enabled')
+ args['guestcidraddress'] = self.module.params.get('guest_cidr_address')
+ args['dhcpprovider'] = self.module.params.get('dhcp_provider')
+ state = self.module.params.get('state')
+ if state in [ 'enabled', 'disabled']:
+ args['allocationstate'] = state.capitalize()
+ return args
+
+
+ def get_zone(self):
+ if not self.zone:
+ args = {}
+
+ uuid = self.module.params.get('id')
+ if uuid:
+ args['id'] = uuid
+ zones = self.cs.listZones(**args)
+ if zones:
+ self.zone = zones['zone'][0]
+ return self.zone
+
+ args['name'] = self.module.params.get('name')
+ zones = self.cs.listZones(**args)
+ if zones:
+ self.zone = zones['zone'][0]
+ return self.zone
+
+
+ def present_zone(self):
+ zone = self.get_zone()
+ if zone:
+ zone = self._update_zone()
+ else:
+ zone = self._create_zone()
+ return zone
+
+
+ def _create_zone(self):
+ required_params = [
+ 'dns1',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ self.result['changed'] = True
+
+ args = self._get_common_zone_args()
+ args['domainid'] = self.get_domain(key='id')
+ args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled')
+
+ zone = None
+ if not self.module.check_mode:
+ res = self.cs.createZone(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ zone = res['zone']
+ return zone
+
+
+ def _update_zone(self):
+ zone = self.get_zone()
+
+ args = self._get_common_zone_args()
+ args['id'] = zone['id']
+
+ if self.has_changed(args, zone):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.updateZone(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ zone = res['zone']
+ return zone
+
+
+ def absent_zone(self):
+ zone = self.get_zone()
+ if zone:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = zone['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deleteZone(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return zone
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ id = dict(default=None),
+ name = dict(required=True),
+ dns1 = dict(default=None),
+ dns2 = dict(default=None),
+ internal_dns1 = dict(default=None),
+ internal_dns2 = dict(default=None),
+ dns1_ipv6 = dict(default=None),
+ dns2_ipv6 = dict(default=None),
+ network_type = dict(default='basic', choices=['Basic', 'basic', 'Advanced', 'advanced']),
+ network_domain = dict(default=None),
+ guest_cidr_address = dict(default=None),
+ dhcp_provider = dict(default=None),
+ local_storage_enabled = dict(default=None),
+ securitygroups_enabled = dict(default=None),
+ state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
+ domain = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_zone = AnsibleCloudStackZone(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ zone = acs_zone.absent_zone()
+ else:
+ zone = acs_zone.present_zone()
+
+ result = acs_zone.get_result(zone)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/cloudstack/cs_zone_facts.py b/lib/ansible/modules/extras/cloud/cloudstack/cs_zone_facts.py
new file mode 100644
index 0000000000..7b5076659f
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/cloudstack/cs_zone_facts.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_zone_facts
+short_description: Gathering facts of zones from Apache CloudStack based clouds.
+description:
+ - Gathering facts from the API of a zone.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the zone.
+ required: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- local_action:
+ module: cs_zone_facts
+ name: ch-gva-1
+
+- debug: var=cloudstack_zone
+'''
+
+RETURN = '''
+---
+cloudstack_zone.id:
+ description: UUID of the zone.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+cloudstack_zone.name:
+ description: Name of the zone.
+ returned: success
+ type: string
+ sample: zone01
+cloudstack_zone.dns1:
+ description: First DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+cloudstack_zone.dns2:
+ description: Second DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+cloudstack_zone.internal_dns1:
+ description: First internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+cloudstack_zone.internal_dns2:
+ description: Second internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+cloudstack_zone.dns1_ipv6:
+ description: First IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8888"
+cloudstack_zone.dns2_ipv6:
+ description: Second IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8844"
+cloudstack_zone.allocation_state:
+ description: State of the zone.
+ returned: success
+ type: string
+ sample: Enabled
+cloudstack_zone.domain:
+ description: Domain the zone is related to.
+ returned: success
+ type: string
+ sample: ROOT
+cloudstack_zone.network_domain:
+ description: Network domain for the zone.
+ returned: success
+ type: string
+ sample: example.com
+cloudstack_zone.network_type:
+ description: Network type for the zone.
+ returned: success
+ type: string
+ sample: basic
+cloudstack_zone.local_storage_enabled:
+ description: Local storage offering enabled.
+ returned: success
+ type: bool
+ sample: false
+cloudstack_zone.securitygroups_enabled:
+ description: Security groups support is enabled.
+ returned: success
+ type: bool
+ sample: false
+cloudstack_zone.guest_cidr_address:
+ description: Guest CIDR address for the zone
+ returned: success
+ type: string
+ sample: 10.1.1.0/24
+cloudstack_zone.dhcp_provider:
+ description: DHCP provider for the zone
+ returned: success
+ type: string
+ sample: VirtualRouter
+cloudstack_zone.zone_token:
+ description: Zone token
+ returned: success
+ type: string
+ sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
+cloudstack_zone.tags:
+ description: List of resource tags associated with the zone.
+ returned: success
+ type: dict
+ sample: [ { "key": "foo", "value": "bar" } ]
+'''
+
+import base64
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackZoneFacts(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackZoneFacts, self).__init__(module)
+ self.returns = {
+ 'dns1': 'dns1',
+ 'dns2': 'dns2',
+ 'internaldns1': 'internal_dns1',
+ 'internaldns2': 'internal_dns2',
+ 'ipv6dns1': 'dns1_ipv6',
+ 'ipv6dns2': 'dns2_ipv6',
+ 'domain': 'network_domain',
+ 'networktype': 'network_type',
+ 'securitygroupsenabled': 'securitygroups_enabled',
+ 'localstorageenabled': 'local_storage_enabled',
+ 'guestcidraddress': 'guest_cidr_address',
+ 'dhcpprovider': 'dhcp_provider',
+ 'allocationstate': 'allocation_state',
+ 'zonetoken': 'zone_token',
+ }
+ self.facts = {
+ 'cloudstack_zone': None,
+ }
+
+
+ def get_zone(self):
+ if not self.zone:
+ # TODO: add param key signature in get_zone()
+ self.module.params['zone'] = self.module.params.get('name')
+ super(AnsibleCloudStackZoneFacts, self).get_zone()
+ return self.zone
+
+
+ def run(self):
+ zone = self.get_zone()
+ self.facts['cloudstack_zone'] = self.get_result(zone)
+ return self.facts
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ )
+
+ cs_zone_facts = AnsibleCloudStackZoneFacts(module=module).run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_zone_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/google/__init__.py b/lib/ansible/modules/extras/cloud/google/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/google/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/google/gcdns_record.py b/lib/ansible/modules/extras/cloud/google/gcdns_record.py
new file mode 100644
index 0000000000..19b70a8581
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/google/gcdns_record.py
@@ -0,0 +1,790 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+#
+# This file is part of Ansible.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_record
+short_description: Creates or removes resource records in Google Cloud DNS
+description:
+ - Creates or removes resource records in Google Cloud DNS.
+version_added: "2.2"
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+options:
+ state:
+ description:
+ - Whether the given resource record should or should not be present.
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+ record:
+ description:
+ - The fully-qualified domain name of the resource record.
+ required: true
+ aliases: ['name']
+ zone:
+ description:
+ - The DNS domain name of the zone (e.g., example.com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - If both I(zone) and I(zone_id) are specifed, I(zone_id) will be
+ used.
+ required: false
+ zone_id:
+ description:
+ - The Google Cloud ID of the zone (e.g., example-com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - These usually take the form of domain names with the dots replaced
+ with dashes. A zone ID will never have any dots in it.
+ - I(zone_id) can be faster than I(zone) in projects with a large
+ number of zones.
+ - If both I(zone) and I(zone_id) are specifed, I(zone_id) will be
+ used.
+ required: false
+ type:
+ description:
+ - The type of resource record to add.
+ required: true
+ choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+ record_data:
+ description:
+ - The record_data to use for the resource record.
+ - I(record_data) must be specified if I(state) is C(present) or
+ I(overwrite) is C(True), or the module will fail.
+ - Valid record_data vary based on the record's I(type). In addition,
+ resource records that contain a DNS domain name in the value
+ field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
+ in the value.
+ - Individual string record_data for TXT records must be enclosed in
+ double quotes.
+ - For resource records that have the same name but different
+ record_data (e.g., multiple A records), they must be defined as
+ multiple list entries in a single record.
+ required: false
+ aliases: ['value']
+ ttl:
+ description:
+ - The amount of time in seconds that a resource record will remain
+ cached by a caching resolver.
+ required: false
+ default: 300
+ overwrite:
+ description:
+ - Whether an attempt to overwrite an existing record should succeed
+ or fail. The behavior of this option depends on I(state).
+ - If I(state) is C(present) and I(overwrite) is C(True), this
+ module will replace an existing resource record of the same name
+ with the provided I(record_data). If I(state) is C(present) and
+ I(overwrite) is C(False), this module will fail if there is an
+ existing resource record with the same name and type, but
+ different resource data.
+ - If I(state) is C(absent) and I(overwrite) is C(True), this
+ module will remove the given resource record unconditionally.
+ If I(state) is C(absent) and I(overwrite) is C(False), this
+ module will fail if the provided record_data do not match exactly
+ with the existing resource record's record_data.
+ required: false
+ choices: [True, False]
+ default: False
+ service_account_email:
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ required: false
+ default: null
+ pem_file:
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ required: false
+ default: null
+ credentials_file:
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ required: false
+ default: null
+ project_id:
+ description:
+ - The Google Cloud Platform project ID to use.
+ required: false
+ default: null
+notes:
+ - See also M(gcdns_zone).
+ - This modules's underlying library does not support in-place updates for
+ DNS resource records. Instead, resource records are quickly deleted and
+ recreated.
+ - SOA records are technically supported, but their functionality is limited
+ to verifying that a zone's existing SOA record matches a pre-determined
+ value. The SOA record cannot be updated.
+ - Root NS records cannot be updated.
+ - NAPTR records are not supported.
+'''
+
+EXAMPLES = '''
+# Create an A record.
+- gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ value: '1.2.3.4'
+
+# Update an existing record.
+- gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ value: '5.6.7.8'
+
+# Remove an A record.
+- gcdns_record:
+ record: 'www1.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ value: '5.6.7.8'
+
+# Create a CNAME record.
+- gcdns_record:
+ record: 'www.example.com'
+ zone_id: 'example-com'
+ type: CNAME
+ value: 'www.example.com.' # Note the trailing dot
+
+# Create an MX record with a custom TTL.
+- gcdns_record:
+ record: 'example.com'
+ zone: 'example.com'
+ type: MX
+ ttl: 3600
+ value: '10 mail.example.com.' # Note the trailing dot
+
+# Create multiple A records with the same name.
+- gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ type: A
+ record_data:
+ - '192.0.2.23'
+ - '10.4.5.6'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+# Change the value of an existing record with multiple record_data.
+- gcdns_record:
+ record: 'api.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ record_data: # WARNING: All values in a record will be replaced
+ - '192.0.2.23'
+ - '192.0.2.42' # The changed record
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+# Safely remove a multi-line record.
+- gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ record_data: # NOTE: All of the values must match exactly
+ - '192.0.2.23'
+ - '192.0.2.42'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+# Unconditionally remove a record.
+- gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ overwrite: true # overwrite is true, so no values are needed
+ type: A
+
+# Create an AAAA record
+- gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: AAAA
+ value: 'fd00:db8::1'
+
+# Create a PTR record
+- gcdns_record:
+ record: '10.5.168.192.in-addr.arpa'
+ zone: '5.168.192.in-addr.arpa'
+ type: PTR
+ value: 'api.example.com.' # Note the trailing dot.
+
+# Create an NS record
+- gcdns_record:
+ record: 'subdomain.example.com'
+ zone: 'example.com'
+ type: NS
+ ttl: 21600
+ record_data:
+ - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
+ - 'ns-cloud-d2.googledomains.com.'
+ - 'ns-cloud-d3.googledomains.com.'
+ - 'ns-cloud-d4.googledomains.com.'
+
+# Create a TXT record
+- gcdns_record:
+ record: 'example.com'
+ zone_id: 'example-com'
+ type: TXT
+ record_data:
+ - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
+ - '"hello " "world"' # A multi-string TXT value
+'''
+
+RETURN = '''
+overwrite:
+ description: Whether to the module was allowed to overwrite the record
+ returned: success
+ type: boolean
+ sample: True
+record:
+ description: Fully-qualified domain name of the resource record
+ returned: success
+ type: string
+ sample: mail.example.com.
+state:
+ description: Whether the record is present or absent
+ returned: success
+ type: string
+ sample: present
+ttl:
+ description: The time-to-live of the resource record
+ returned: success
+ type: int
+ sample: 300
+type:
+ description: The type of the resource record
+ returned: success
+ type: string
+ sample: A
+record_data:
+ description: The resource record values
+ returned: success
+ type: list
+ sample: ['5.6.7.8', '9.10.11.12']
+zone:
+ description: The dns name of the zone
+ returned: success
+ type: string
+ sample: example.com.
+zone_id:
+ description: The Google Cloud DNS ID of the zone
+ returned: success
+ type: string
+ sample: example-com
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+import socket
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.types import LibcloudError
+ from libcloud.dns.types import Provider
+ from libcloud.dns.types import RecordDoesNotExistError
+ from libcloud.dns.types import ZoneDoesNotExistError
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The libcloud Google Cloud DNS provider.
+PROVIDER = Provider.GOOGLE
+
+# The records that libcloud's Google Cloud DNS provider supports.
+#
+# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
+# this information and is the authoritative source on which records are
+# supported, but accessing the dictionary requires creating a Google Cloud DNS
+# driver object, which is done in a helper module.
+#
+# I'm hard-coding the supported record types here, because they (hopefully!)
+# shouldn't change much, and it allows me to use it as a "choices" parameter
+# in an AnsibleModule argument_spec.
+SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+
+
+################################################################################
+# Functions
+################################################################################
+
+def create_record(module, gcdns, zone, record):
+ """Creates or overwrites a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ record_name = module.params['record']
+ record_type = module.params['type']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+ data = dict(ttl=ttl, rrdatas=record_data)
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # If we found a record, we need to check if the values match.
+ if record is not None:
+ # If the record matches, we obviously don't have to change anything.
+ if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ return False
+
+ # The record doesn't match, so we need to check if we can overwrite it.
+ if not overwrite:
+ module.fail_json(
+ msg = 'cannot overwrite existing record, overwrite protection enabled',
+ changed = False
+ )
+
+ # The record either doesn't exist, or it exists and we can overwrite it.
+ if record is None and not module.check_mode:
+ # There's no existing record, so we'll just create it.
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The resource record name and type are valid by themselves, but
+ # not when combined (e.g., an 'A' record with "www.example.com"
+ # as its value).
+ module.fail_json(
+ msg = 'value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed = False
+ )
+
+ elif error.code == 'cnameResourceRecordSetConflict':
+ # We're attempting to create a CNAME resource record when we
+ # already have another type of resource record with the name
+ # domain name.
+ module.fail_json(
+ msg = "non-CNAME resource record already exists: %s" % record_name,
+ changed = False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+ elif record is not None and not module.check_mode:
+ # The Google provider in libcloud doesn't support updating a record in
+ # place, so if the record already exists, we need to delete it and
+ # recreate it using the new information.
+ gcdns.delete_record(record)
+
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError:
+ # Something blew up when creating the record. This will usually be a
+ # result of invalid value data in the new record. Unfortunately, we
+ # already changed the state of the record by deleting the old one,
+ # so we'll try to roll back before failing out.
+ try:
+ gcdns.create_record(record.name, record.zone, record.type, record.data)
+ module.fail_json(
+ msg = 'error updating record, the original record was restored',
+ changed = False
+ )
+ except LibcloudError:
+ # We deleted the old record, couldn't create the new record, and
+ # couldn't roll back. That really sucks. We'll dump the original
+ # record to the failure output so the user can resore it if
+ # necessary.
+ module.fail_json(
+ msg = 'error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed = True)
+
+ return True
+
+
+def remove_record(module, gcdns, record):
+ """Remove a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # If there is no record, we're obviously done.
+ if record is None:
+ return False
+
+ # If there is an existing record, do our values match the values of the
+ # existing record?
+ if not overwrite:
+ if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ module.fail_json(
+ msg = 'cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed = False
+ )
+
+ # If we got to this point, we're okay to delete the record.
+ if not module.check_mode:
+ gcdns.delete_record(record)
+
+ return True
+
+
+def _get_record(gcdns, zone, record_type, record_name):
+ """Gets the record object for a given FQDN."""
+
+ # The record ID is a combination of its type and FQDN. For example, the
+ # ID of an A record for www.example.com would be 'A:www.example.com.'
+ record_id = "%s:%s" % (record_type, record_name)
+
+ try:
+ return gcdns.get_record(zone.id, record_id)
+ except RecordDoesNotExistError:
+ return None
+
+
+def _get_zone(gcdns, zone_name, zone_id):
+ """Gets the zone object for a given domain name."""
+
+ if zone_id is not None:
+ try:
+ return gcdns.get_zone(zone_id)
+ except ZoneDoesNotExistError:
+ return None
+
+ # To create a zone, we need to supply a domain name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on domain
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching domain name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
+ """Checks to see if original and new TTL and values match."""
+
+ matches = True
+
+ if old_ttl != new_ttl:
+ matches = False
+ if old_record_data != new_record_data:
+ matches = False
+
+ return matches
+
+
+def _sanity_check(module):
+ """Run sanity checks that don't depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+
+ # A negative TTL is not permitted (how would they even work?!).
+ if ttl < 0:
+ module.fail_json(
+ msg = 'TTL cannot be less than zero, got: %d' % ttl,
+ changed = False
+ )
+
+ # Deleting SOA records is not permitted.
+ if record_type == 'SOA' and state == 'absent':
+ module.fail_json(msg='cannot delete SOA records', changed=False)
+
+ # Updating SOA records is not permitted.
+ if record_type == 'SOA' and state == 'present' and overwrite:
+ module.fail_json(msg='cannot update SOA records', changed=False)
+
+ # Some sanity checks depend on what value was supplied.
+ if record_data is not None and (state == 'present' or not overwrite):
+ # A records must contain valid IPv4 addresses.
+ if record_type == 'A':
+ for value in record_data:
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ module.fail_json(
+ msg = 'invalid A record value, got: %s' % value,
+ changed = False
+ )
+
+ # AAAA records must contain valid IPv6 addresses.
+ if record_type == 'AAAA':
+ for value in record_data:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ module.fail_json(
+ msg = 'invalid AAAA record value, got: %s' % value,
+ changed = False
+ )
+
+ # CNAME and SOA records can't have multiple values.
+ if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
+ module.fail_json(
+ msg = 'CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed = False
+ )
+
+ # Google Cloud DNS does not support wildcard NS records.
+ if record_type == 'NS' and record_name[0] == '*':
+ module.fail_json(
+ msg = "wildcard NS records not allowed, got: %s" % record_name,
+ changed = False
+ )
+
+ # Values for txt records must begin and end with a double quote.
+ if record_type == 'TXT':
+ for value in record_data:
+ if value[0] != '"' and value[-1] != '"':
+ module.fail_json(
+ msg = 'TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed = False
+ )
+
+
+def _additional_sanity_checks(module, zone):
+ """Run input sanity checks that depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+
+ # CNAME records are not allowed to have the same name as the root domain.
+ if record_type == 'CNAME' and record_name == zone.domain:
+ module.fail_json(
+ msg = 'CNAME records cannot match the zone name',
+ changed = False
+ )
+
+ # The root domain must always have an NS record.
+ if record_type == 'NS' and record_name == zone.domain and state == 'absent':
+ module.fail_json(
+ msg = 'cannot delete root NS records',
+ changed = False
+ )
+
+ # Updating NS records with the name as the root domain is not allowed
+ # because libcloud does not support in-place updates and root domain NS
+ # records cannot be removed.
+ if record_type == 'NS' and record_name == zone.domain and overwrite:
+ module.fail_json(
+ msg = 'cannot update existing root NS records',
+ changed = False
+ )
+
+ # SOA records with names that don't match the root domain are not permitted
+ # (and wouldn't make sense anyway).
+ if record_type == 'SOA' and record_name != zone.domain:
+ module.fail_json(
+ msg = 'non-root SOA records are not permitted, got: %s' % record_name,
+ changed = False
+ )
+
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ record = dict(required=True, aliases=['name'], type='str'),
+ zone = dict(type='str'),
+ zone_id = dict(type='str'),
+ type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data = dict(aliases=['value'], type='list'),
+ ttl = dict(default=300, type='int'),
+ overwrite = dict(default=False, type='bool'),
+ service_account_email = dict(type='str'),
+ pem_file = dict(type='path'),
+ credentials_file = dict(type='path'),
+ project_id = dict(type='str')
+ ),
+ required_if = [
+ ('state', 'present', ['record_data']),
+ ('overwrite', False, ['record_data'])
+ ],
+ required_one_of = [['zone', 'zone_id']],
+ supports_check_mode = True
+ )
+
+ _sanity_check(module)
+
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
+
+ json_output = dict(
+ state = state,
+ record = record_name,
+ zone = zone_name,
+ zone_id = zone_id,
+ type = record_type,
+ record_data = module.params['record_data'],
+ ttl = ttl,
+ overwrite = module.boolean(module.params['overwrite'])
+ )
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if zone_name is not None and zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # Build a connection object that we can use to connect with Google Cloud
+ # DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check that the zone we're creating a record for actually
+ # exists.
+ zone = _get_zone(gcdns, zone_name, zone_id)
+ if zone is None and zone_name is not None:
+ module.fail_json(
+ msg = 'zone name was not found: %s' % zone_name,
+ changed = False
+ )
+ elif zone is None and zone_id is not None:
+ module.fail_json(
+ msg = 'zone id was not found: %s' % zone_id,
+ changed = False
+ )
+
+ # Populate the returns with the actual zone information.
+ json_output['zone'] = zone.domain
+ json_output['zone_id'] = zone.id
+
+ # We also need to check if the record we want to create or remove actually
+ # exists.
+ try:
+ record = _get_record(gcdns, zone, record_type, record_name)
+ except InvalidRequestError:
+ # We gave Google Cloud DNS an invalid DNS record name.
+ module.fail_json(
+ msg = 'record name is invalid: %s' % record_name,
+ changed = False
+ )
+
+ _additional_sanity_checks(module, zone)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if record is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ record = record.data['name'],
+ type = record.data['type'],
+ record_data = record.data['rrdatas'],
+ ttl = record.data['ttl']
+ )
+ diff['before_header'] = "%s:%s" % (record_type, record_name)
+
+ # Create, remove, or modify the record.
+ if state == 'present':
+ diff['after'] = dict(
+ record = record_name,
+ type = record_type,
+ record_data = module.params['record_data'],
+ ttl = ttl
+ )
+ diff['after_header'] = "%s:%s" % (record_type, record_name)
+
+ changed = create_record(module, gcdns, zone, record)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_record(module, gcdns, record)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.gcdns import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/google/gcdns_zone.py b/lib/ansible/modules/extras/cloud/google/gcdns_zone.py
new file mode 100644
index 0000000000..4b7bd16985
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/google/gcdns_zone.py
@@ -0,0 +1,381 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+#
+# This file is part of Ansible.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_zone
+short_description: Creates or removes zones in Google Cloud DNS
+description:
+ - Creates or removes managed zones in Google Cloud DNS.
+version_added: "2.2"
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+options:
+ state:
+ description:
+ - Whether the given zone should or should not be present.
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+ zone:
+ description:
+ - The DNS domain name of the zone.
+ - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
+ you attempt to specify a zone ID, this module will attempt to
+ create a TLD and will fail.
+ required: true
+ aliases: ['name']
+ description:
+ description:
+ - An arbitrary text string to use for the zone description.
+ required: false
+ default: ""
+ service_account_email:
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ required: false
+ default: null
+ pem_file:
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ required: false
+ default: null
+ credentials_file:
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ required: false
+ default: null
+ project_id:
+ description:
+ - The Google Cloud Platform project ID to use.
+ required: false
+ default: null
+notes:
+ - See also M(gcdns_record).
+ - Zones that are newly created must still be set up with a domain registrar
+ before they can be used.
+'''
+
+EXAMPLES = '''
+# Basic zone creation example.
+- name: Create a basic zone with the minimum number of parameters.
+ gcdns_zone: zone=example.com
+
+# Zone removal example.
+- name: Remove a zone.
+ gcdns_zone: zone=example.com state=absent
+
+# Zone creation with description
+- name: Creating a zone with a description
+ gcdns_zone: zone=example.com description="This is an awesome zone"
+'''
+
+RETURN = '''
+description:
+ description: The zone's description
+ returned: success
+ type: string
+ sample: This is an awesome zone
+state:
+ description: Whether the zone is present or absent
+ returned: success
+ type: string
+ sample: present
+zone:
+ description: The zone's DNS name
+ returned: success
+ type: string
+ sample: example.com.
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ from libcloud.dns.types import Provider
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The libcloud Google Cloud DNS provider.
+PROVIDER = Provider.GOOGLE
+
+# The URL used to verify ownership of a zone in Google Cloud DNS.
+ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
+
+################################################################################
+# Functions
+################################################################################
+
+def create_zone(module, gcdns, zone):
+ """Creates a new Google Cloud DNS zone."""
+
+ description = module.params['description']
+ extra = dict(description = description)
+ zone_name = module.params['zone']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ # If we got a zone back, then the domain exists.
+ if zone is not None:
+ return False
+
+ # The zone doesn't exist yet.
+ try:
+ if not module.check_mode:
+ gcdns.create_zone(domain=zone_name, extra=extra)
+ return True
+
+ except ResourceExistsError:
+ # The zone already exists. We checked for this already, so either
+ # Google is lying, or someone was a ninja and created the zone
+ # within milliseconds of us checking for its existence. In any case,
+ # the zone has already been created, so we have nothing more to do.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The zone name or a parameter might be completely invalid. This is
+ # typically caused by an illegal DNS name (e.g. foo..com).
+ module.fail_json(
+ msg = "zone name is not a valid DNS name: %s" % zone_name,
+ changed = False
+ )
+
+ elif error.code == 'managedZoneDnsNameNotAvailable':
+ # Google Cloud DNS will refuse to create zones with certain domain
+ # names, such as TLDs, ccTLDs, or special domain names such as
+ # example.com.
+ module.fail_json(
+ msg = "zone name is reserved or already in use: %s" % zone_name,
+ changed = False
+ )
+
+ elif error.code == 'verifyManagedZoneDnsNameOwnership':
+ # This domain name needs to be verified before Google will create
+ # it. This occurs when a user attempts to create a zone which shares
+ # a domain name with a zone hosted elsewhere in Google Cloud DNS.
+ module.fail_json(
+ msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed = False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def remove_zone(module, gcdns, zone):
+ """Removes an existing Google Cloud DNS zone."""
+
+ # If there's no zone, then we're obviously done.
+ if zone is None:
+ return False
+
+ # An empty zone will have two resource records:
+ # 1. An NS record with a list of authoritative name servers
+ # 2. An SOA record
+ # If any additional resource records are present, Google Cloud DNS will
+ # refuse to remove the zone.
+ if len(zone.list_records()) > 2:
+ module.fail_json(
+ msg = "zone is not empty and cannot be removed: %s" % zone.domain,
+ changed = False
+ )
+
+ try:
+ if not module.check_mode:
+ gcdns.delete_zone(zone)
+ return True
+
+ except ResourceNotFoundError:
+ # When we performed our check, the zone existed. It may have been
+ # deleted by something else. It's gone, so whatever.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'containerNotEmpty':
+ # When we performed our check, the zone existed and was empty. In
+ # the milliseconds between the check and the removal command,
+ # records were added to the zone.
+ module.fail_json(
+ msg = "zone is not empty and cannot be removed: %s" % zone.domain,
+ changed = False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def _get_zone(gcdns, zone_name):
+ """Gets the zone object for a given domain name."""
+
+ # To create a zone, we need to supply a zone name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on zone
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+def _sanity_check(module):
+ """Run module sanity checks."""
+
+ zone_name = module.params['zone']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+
+ # Google Cloud DNS does not support the creation of TLDs.
+ if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
+ module.fail_json(
+ msg = 'cannot create top-level domain: %s' % zone_name,
+ changed = False
+ )
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ zone = dict(required=True, aliases=['name'], type='str'),
+ description = dict(default='', type='str'),
+ service_account_email = dict(type='str'),
+ pem_file = dict(type='path'),
+ credentials_file = dict(type='path'),
+ project_id = dict(type='str')
+ ),
+ supports_check_mode = True
+ )
+
+ _sanity_check(module)
+
+ zone_name = module.params['zone']
+ state = module.params['state']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ json_output = dict(
+ state = state,
+ zone = zone_name,
+ description = module.params['description']
+ )
+
+ # Build a connection object that was can use to connect with Google
+ # Cloud DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check if the zone we're attempting to create already exists.
+ zone = _get_zone(gcdns, zone_name)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if zone is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ zone = zone.domain,
+ description = zone.extra['description']
+ )
+ diff['before_header'] = zone_name
+
+ # Create or remove the zone.
+ if state == 'present':
+ diff['after'] = dict(
+ zone = zone_name,
+ description = module.params['description']
+ )
+ diff['after_header'] = zone_name
+
+ changed = create_zone(module, gcdns, zone)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_zone(module, gcdns, zone)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.gcdns import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/google/gce_img.py b/lib/ansible/modules/extras/cloud/google/gce_img.py
new file mode 100644
index 0000000000..270ae827dd
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/google/gce_img.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+version_added: "1.9"
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ description:
+ - the name of the image to create or delete
+ required: true
+ default: null
+ description:
+ description:
+ - an optional description
+ required: false
+ default: null
+ family:
+ description:
+ - an optional family name
+ required: false
+ default: null
+ version_added: "2.2"
+ source:
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ required: false
+ default: null
+ state:
+ description:
+ - desired state of the image
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ description:
+ - the zone of the disk specified by source
+ required: false
+ default: "us-central1-a"
+ timeout:
+ description:
+ - timeout for the operation
+ required: false
+ default: 180
+ version_added: "2.0"
+ service_account_email:
+ description:
+ - service account email
+ required: false
+ default: null
+ pem_file:
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ project_id:
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (supertom)"
+'''
+
+EXAMPLES = '''
+# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
+- gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+# Create an image named test-image from a tarball in Google Cloud Storage.
+- gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+# Alternatively use the gs scheme
+- gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+# Delete an image named test-image.
+- gce_img:
+ name: test-image
+ state: absent
+'''
+
+import sys
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError, e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError, e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError, e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.gce import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/google/gce_tag.py b/lib/ansible/modules/extras/cloud/google/gce_tag.py
new file mode 100644
index 0000000000..cb1f2a2c3e
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/google/gce_tag.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+version_added: "2.0"
+short_description: add or remove tag(s) to/from GCE instance
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/instances/#tags)
+ to/from GCE instance.
+options:
+ instance_name:
+ description:
+ - the name of the GCE instance to add/remove tags
+ required: true
+ default: null
+ aliases: []
+ tags:
+ description:
+ - comma-separated list of tags to add or remove
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - desired state of the tags
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ aliases: []
+ zone:
+ description:
+ - the zone of the disk specified by source
+ required: false
+ default: "us-central1-a"
+ aliases: []
+ service_account_email:
+ description:
+ - service account email
+ required: false
+ default: null
+ aliases: []
+ pem_file:
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ aliases: []
+ project_id:
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+ aliases: []
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Do Hoang Khiem (dohoangkhiem@gmail.com)"
+'''
+
+EXAMPLES = '''
+# Add tags 'http-server', 'https-server', 'staging' to instance name 'staging-server' in zone us-central1-a.
+- gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+# Remove tags 'foo', 'bar' from instance 'test-server' in default zone (us-central1-a)
+- gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+
+def add_tags(gce, module, instance_name, tags):
+ """Add tags to instance."""
+ zone = module.params.get('zone')
+
+ if not instance_name:
+ module.fail_json(msg='Must supply instance_name', changed=False)
+
+ if not tags:
+ module.fail_json(msg='Must supply tags', changed=False)
+
+ tags = [x.lower() for x in tags]
+
+ try:
+ node = gce.ex_get_node(instance_name, zone=zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError, e:
+ module.fail_json(msg=str(e), changed=False)
+
+ node_tags = node.extra['tags']
+ changed = False
+ tags_changed = []
+
+ for t in tags:
+ if t not in node_tags:
+ changed = True
+ node_tags.append(t)
+ tags_changed.append(t)
+
+ if not changed:
+ return False, None
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def remove_tags(gce, module, instance_name, tags):
+ """Remove tags from instance."""
+ zone = module.params.get('zone')
+
+ if not instance_name:
+ module.fail_json(msg='Must supply instance_name', changed=False)
+
+ if not tags:
+ module.fail_json(msg='Must supply tags', changed=False)
+
+ tags = [x.lower() for x in tags]
+
+ try:
+ node = gce.ex_get_node(instance_name, zone=zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError, e:
+ module.fail_json(msg=str(e), changed=False)
+
+ node_tags = node.extra['tags']
+
+ changed = False
+ tags_changed = []
+
+ for t in tags:
+ if t in node_tags:
+ node_tags.remove(t)
+ changed = True
+ tags_changed.append(t)
+
+ if not changed:
+ return False, None
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ tags=dict(type='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ instance_name = module.params.get('instance_name')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not zone:
+ module.fail_json(msg='Must specify "zone"', changed=False)
+
+ if not tags:
+ module.fail_json(msg='Must specify "tags"', changed=False)
+
+ gce = gce_connect(module)
+
+ # add tags to instance.
+ if state == 'present':
+ changed, tags_changed = add_tags(gce, module, instance_name, tags)
+
+ # remove tags from instance
+ if state == 'absent':
+ changed, tags_changed = remove_tags(gce, module, instance_name, tags)
+
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.gce import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/cloud/lxc/__init__.py b/lib/ansible/modules/extras/cloud/lxc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/lxc/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/lxc/lxc_container.py b/lib/ansible/modules/extras/cloud/lxc/lxc_container.py
new file mode 100644
index 0000000000..22c72f4344
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/lxc/lxc_container.py
@@ -0,0 +1,1759 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = """
+---
+module: lxc_container
+short_description: Manage LXC Containers
+version_added: 1.8.0
+description:
+ - Management of LXC containers
+author: "Kevin Carter (@cloudnull)"
+options:
+ name:
+ description:
+ - Name of a container.
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ required: false
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ required: false
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ required: false
+ config:
+ description:
+ - Path to the LXC configuration file.
+ required: false
+ default: null
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ default: $CONTAINER_NAME
+ required: false
+ vg_name:
+ description:
+ - If Backend store is lvm, specify the name of the volume group.
+ default: lxc
+ required: false
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ required: false
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ default: ext4
+ required: false
+ fs_size:
+ description:
+ - File system Size.
+ default: 5G
+ required: false
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ required: false
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ required: false
+ container_command:
+ description:
+ - Run a command within a container.
+ required: false
+ lxc_path:
+ description:
+ - Place container under PATH
+ required: false
+ container_log:
+ choices:
+ - true
+ - false
+ description:
+ - Enable a container log for host actions to the container.
+ default: false
+ container_log_level:
+ choices:
+ - INFO
+ - ERROR
+ - DEBUG
+ description:
+ - Set the log level for a container where *container_log* was set.
+ required: false
+ default: INFO
+ clone_name:
+ version_added: "2.0"
+ description:
+ - Name of the new cloned server. This is only used when state is
+ clone.
+ required: false
+ default: false
+ clone_snapshot:
+ version_added: "2.0"
+ required: false
+ choices:
+ - true
+ - false
+ description:
+ - Create a snapshot a container when cloning. This is not supported
+ by all container storage backends. Enabling this may fail if the
+ backing store does not support snapshots.
+ default: false
+ archive:
+ choices:
+ - true
+ - false
+ description:
+ - Create an archive of a container. This will create a tarball of the
+ running container.
+ default: false
+ archive_path:
+ description:
+ - Path the save the archived container. If the path does not exist
+ the archive method will attempt to create it.
+ default: null
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container. If you clone a container using
+ `clone_name` the newly cloned container created in a stopped state.
+ The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ required: false
+ default: started
+ container_config:
+ description:
+ - list of 'key=value' options to use when configuring a container.
+ required: false
+requirements:
+ - 'lxc >= 1.0 # OS package'
+ - 'python >= 2.6 # OS Package'
+ - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The "container_command" can be used with any state except "absent". If
+ used with state "stopped" the container will be "started", the command
+ executed, and then the container "stopped" again. Likewise if the state
+ is "stopped" and the container does not exist it will be first created,
+ "started", the command executed, and then "stopped". If you use a "|"
+ in the variable you can use common script formatting within the variable
+ iteself The "container_command" option will always execute as BASH.
+ When using "container_command" a log file is created in the /tmp/ directory
+ which contains both stdout and stderr of any command executed.
+ - If "archive" is **true** the system will attempt to create a compressed
+ tarball of the running container. The "archive" option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for "python2-lxc", which is a
+ requirement for this module, it can be installed from source at
+ "https://github.com/lxc/python2-lxc" or installed via pip using the package
+ name lxc-python2.
+"""
+
+EXAMPLES = """
+- name: Create a started container
+ lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ debug: var=lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: debug info on container "test-container"
+ debug: var=clone_container_info
+
+- name: Clone a container using snapshot
+ lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ lxc_container:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN="""
+lxc_container:
+ description: container information
+ returned: success
+ type: list
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: string
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: string
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: string
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: boolean
+ sample: True
+"""
+
+import re
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python 2.4+ when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ import os
+ import os.path as path
+ import subprocess
+ import tempfile
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(ATTACH_TEMPLATE % {'container_command': command})
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700',8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params.get('state', None)
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params.get('lxc_path', None)
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(
+ '%s %s' % (key, value)
+ )
+ else:
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ return_dict = dict()
+ false_values = [None, ''] + BOOLEANS_FALSE
+ for k, v in variables.items():
+ _var = self.module.params.get(k)
+ if _var not in false_values:
+ return_dict[v] = _var
+ else:
+ return return_dict
+
+ def _run_command(self, build_command, unsafe_shell=False):
+ """Return information from running an Ansible Command.
+
+ This will squash the build command list into a string and then
+ execute the command via Ansible. The output is returned to the method.
+ This output is returned as `return_code`, `stdout`, `stderr`.
+
+ :param build_command: Used for the command and all options.
+ :type build_command: ``list``
+ :param unsafe_shell: Enable or Disable unsafe sell commands.
+ :type unsafe_shell: ``bol``
+ """
+
+ return self.module.run_command(
+ ' '.join(build_command),
+ use_unsafe_shell=unsafe_shell
+ )
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params.get('container_config')
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = f.readlines()
+
+ # Note used ast literal_eval because AnsibleModule does not provide for
+ # adequate dictionary parsing.
+ # Issue: https://github.com/ansible/ansible/issues/7679
+ # TODO(cloudnull) adjust import when issue has been resolved.
+ import ast
+ options_dict = ast.literal_eval(_container_config)
+ parsed_options = [i.split('=', 1) for i in options_dict]
+
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ _, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines(container_config)
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ build_command = [
+ self.module.get_bin_path('lxc-clone', True),
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params.get('backing_store') == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-clone."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name %s' % self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('container_log') in BOOLEANS_TRUE:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile %s' % os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority %s' % self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params.get('template_options', None)
+ if template_options:
+ build_command.append('-- %s' % template_options)
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name' : self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ else:
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params.get('container_command')
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for _ in xrange(timeout):
+ if self._get_state() != 'running':
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ else:
+ return True
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params.get('archive') in BOOLEANS_TRUE:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params.get('clone_name')
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for _ in xrange(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077',8))
+
+ archive_path = self.module.params.get('archive_path')
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params.get('archive_compression')
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(
+ os.path.expanduser(source_dir)
+ ),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self._run_command(
+ build_command=build_command,
+ unsafe_shell=True
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir
+ ]
+ rc, stdout, err = self._run_command(
+ build_command,
+ unsafe_shell=True
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t overlayfs',
+ '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=LXC_BACKING_STORE.keys(),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=LXC_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default='false'
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ required=False
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default='false'
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=LXC_COMPRESSION_MAP.keys(),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if = ([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ lv_name = module.params.get('lv_name')
+ if not lv_name:
+ module.params['lv_name'] = module.params.get('name')
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+# import module bits
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/lxd/__init__.py b/lib/ansible/modules/extras/cloud/lxd/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/lxd/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/lxd/lxd_container.py b/lib/ansible/modules/extras/cloud/lxd/lxd_container.py
new file mode 100644
index 0000000000..a92cdd7ce7
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/lxd/lxd_container.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD Containers
+version_added: "2.2"
+description:
+ - Management of LXD containers
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a container.
+ required: true
+ architecture:
+ description:
+ - The archiecture for the container (e.g. "x86_64" or "i686").
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.cpu": "2"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ - If the container already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/containers/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
+ are different, they this module tries to apply the configurations.
+ - The key starts with 'volatile.' are ignored for this comparison.
+ - Not all config values are supported to apply the existing container.
+ Maybe you need to delete and recreate a container.
+ required: false
+ devices:
+ description:
+ - 'The devices for the container
+ (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the container is ephemeral (e.g. true or false).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ source:
+ description:
+ - 'The source for the container
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ required: false
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container.
+ required: false
+ default: started
+ timeout:
+ description:
+ - A timeout for changing the state of the container.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: 30
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the M(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: false
+ force_stop:
+ description:
+ - If this is true, the M(lxd_container) forces to stop the container
+ when it stops or restarts the container.
+ required: false
+ default: false
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ key_file:
+ description:
+ - The client certificate key file path.
+ required: false
+ default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
+ cert_file:
+ description:
+ - The client certificate file path.
+ required: false
+ default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to can run commands in containers, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the container which can
+ be done with the command module.
+ - You can copy a file from the host to the container
+ with the Ansible M(copy) and M(templater) module and the `lxd` connection plugin.
+ See the example below.
+ - You can copy a file in the creatd container to the localhost
+ with `command=lxc file pull container_name/dir/filename filename`.
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: check python is installed in container
+ delegate_to: mycontainer
+ raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: install python in container
+ delegate_to: mycontainer
+ raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ lxd_container:
+ url: https://127.0.0.1:8443
+ # These cert_file and key_file values are equal to the default values.
+ #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+'''
+
+RETURN='''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the container
+ returned: when state is started or restarted
+ type: object
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the container
+ returned: when state is started or restarted
+ type: string
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the container.
+ returned: success
+ type: list
+ sample: '["create", "start"]'
+'''
+
+import os
+from ansible.module_utils.lxd import LXDClient, LXDClientException
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen'
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+try:
+ callable(all)
+except NameError:
+ # For python <2.5
+ # This definition is copied from https://docs.python.org/2/library/functions.html#all
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+
+ self.url = self.module.params['url']
+ self.key_file = self.module.params.get('key_file', None)
+ self.cert_file = self.module.params.get('cert_file', None)
+ self.debug = self.module._verbosity >= 4
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_container_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ def _get_container_state_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}/state'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _container_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ body_json={'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
+
+ def _create_container(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/containers', config)
+ self.actions.append('create')
+
+ def _start_container(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_container(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_container(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_container(self):
+ self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def _freeze_container(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_container(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreez')
+
+ def _container_ipv4_addresses(self, ignore_devices=['lo']):
+ resp_json = self._get_container_state_json()
+ network = resp_json['metadata']['network'] or {}
+ network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all([len(v) > 0 for v in addresses.itervalues()])
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._container_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses):
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ elif self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_container_configs():
+ self._start_container()
+ self._apply_container_configs()
+ self._stop_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._stop_container()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._restart_container()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self.old_state != 'stopped':
+ self._stop_container()
+ self._delete_container()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ self._freeze_container()
+ else:
+ if self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._freeze_container()
+
+ def _needs_to_change_container_config(self, key):
+ if key not in self.config:
+ return False
+ if key == 'config':
+ old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
+ else:
+ old_configs = self.old_container_json['metadata'][key]
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_container_configs(self):
+ return (
+ self._needs_to_change_container_config('architecture') or
+ self._needs_to_change_container_config('config') or
+ self._needs_to_change_container_config('ephemeral') or
+ self._needs_to_change_container_config('devices') or
+ self._needs_to_change_container_config('profiles')
+ )
+
+ def _apply_container_configs(self):
+ old_metadata = self.old_container_json['metadata']
+ body_json = {
+ 'architecture': old_metadata['architecture'],
+ 'config': old_metadata['config'],
+ 'devices': old_metadata['devices'],
+ 'profiles': old_metadata['profiles']
+ }
+ if self._needs_to_change_container_config('architecture'):
+ body_json['architecture'] = self.config['architecture']
+ if self._needs_to_change_container_config('config'):
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ if self._needs_to_change_container_config('ephemeral'):
+ body_json['ephemeral'] = self.config['ephemeral']
+ if self._needs_to_change_container_config('devices'):
+ body_json['devices'] = self.config['devices']
+ if self._needs_to_change_container_config('profiles'):
+ body_json['profiles'] = self.config['profiles']
+ self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
+ self.actions.append('apply_container_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_container_json = self._get_container_json()
+ self.old_state = self._container_json_to_module_state(self.old_container_json)
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=LXD_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False
+ ),
+ url=dict(
+ type='str',
+ default='unix:/var/lib/lxd/unix.socket'
+ ),
+ key_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.key'.format(os.environ['HOME'])
+ ),
+ cert_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ ),
+ trust_password=dict(
+ type='str',
+ )
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+# import module bits
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/lxd/lxd_profile.py b/lib/ansible/modules/extras/cloud/lxd/lxd_profile.py
new file mode 100644
index 0000000000..272a88b174
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/lxd/lxd_profile.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+version_added: "2.2"
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ key_file:
+ description:
+ - The client certificate key file path.
+ required: false
+ default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
+ cert_file:
+ description:
+ - The client certificate file path.
+ required: false
+ default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: 'my macvlan profile'
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: create macvlan profile
+ lxd_profile:
+ url: https://127.0.0.1:8443
+ # These cert_file and key_file values are equal to the default values.
+ #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: 'my macvlan profile'
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN='''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: string
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: '["create"]'
+'''
+
+import os
+from ansible.module_utils.lxd import LXDClient, LXDClientException
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.url = self.module.params['url']
+ self.key_file = self.module.params.get('key_file', None)
+ self.cert_file = self.module.params.get('cert_file', None)
+ self.debug = self.module._verbosity >= 4
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ return self.client.do(
+ 'GET', '/1.0/profiles/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the specified state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/profiles', config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/profiles/{}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _apply_profile_configs(self):
+ config = self.old_profile_json.copy()
+ for k, v in self.config.iteritems():
+ config[k] = v
+ self.client.do('PUT', '/1.0/profiles/{}'.format(self.name), config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ self.client.do('DELETE', '/1.0/profiles/{}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default='unix:/var/lib/lxd/unix.socket'
+ ),
+ key_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.key'.format(os.environ['HOME'])
+ ),
+ cert_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ ),
+ trust_password=dict(
+ type='str',
+ )
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+# import module bits
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/misc/__init__.py b/lib/ansible/modules/extras/cloud/misc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/misc/ovirt.py b/lib/ansible/modules/extras/cloud/misc/ovirt.py
new file mode 100644
index 0000000000..8585dfb6b8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/ovirt.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+
+# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ovirt
+author: "Vincent Van der Kussen (@vincentvdk)"
+short_description: oVirt/RHEV platform management
+description:
+ - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
+version_added: "1.4"
+options:
+ user:
+ description:
+ - the user to authenticate with
+ default: null
+ required: true
+ aliases: []
+ url:
+ description:
+ - the url of the oVirt instance
+ default: null
+ required: true
+ aliases: []
+ instance_name:
+ description:
+ - the name of the instance to use
+ default: null
+ required: true
+ aliases: [ vmname ]
+ password:
+ description:
+ - password of the user to authenticate with
+ default: null
+ required: true
+ aliases: []
+ image:
+ description:
+ - template to use for the instance
+ default: null
+ required: false
+ aliases: []
+ resource_type:
+ description:
+ - whether you want to deploy an image or create an instance from scratch.
+ default: null
+ required: false
+ aliases: []
+ choices: [ 'new', 'template' ]
+ zone:
+ description:
+ - deploy the image to this oVirt cluster
+ default: null
+ required: false
+ aliases: []
+ instance_disksize:
+ description:
+ - size of the instance's disk in GB
+ default: null
+ required: false
+ aliases: [ vm_disksize]
+ instance_cpus:
+ description:
+ - the instance's number of cpu's
+ default: 1
+ required: false
+ aliases: [ vmcpus ]
+ instance_nic:
+ description:
+ - name of the network interface in oVirt/RHEV
+ default: null
+ required: false
+ aliases: [ vmnic ]
+ instance_network:
+ description:
+ - the logical network the machine should belong to
+ default: rhevm
+ required: false
+ aliases: [ vmnetwork ]
+ instance_mem:
+ description:
+ - the instance's amount of memory in MB
+ default: null
+ required: false
+ aliases: [ vmmem ]
+ instance_type:
+ description:
+ - define if the instance is a server or desktop
+ default: server
+ required: false
+ aliases: [ vmtype ]
+ choices: [ 'server', 'desktop' ]
+ disk_alloc:
+ description:
+ - define if disk is thin or preallocated
+ default: thin
+ required: false
+ aliases: []
+ choices: [ 'thin', 'preallocated' ]
+ disk_int:
+ description:
+ - interface type of the disk
+ default: virtio
+ required: false
+ aliases: []
+ choices: [ 'virtio', 'ide' ]
+ instance_os:
+ description:
+ - type of Operating System
+ default: null
+ required: false
+ aliases: [ vmos ]
+ instance_cores:
+ description:
+ - define the instance's number of cores
+ default: 1
+ required: false
+ aliases: [ vmcores ]
+ sdomain:
+ description:
+ - the Storage Domain where you want to create the instance's disk on.
+ default: null
+ required: false
+ aliases: []
+ region:
+ description:
+ - the oVirt/RHEV datacenter where you want to deploy to
+ default: null
+ required: false
+ aliases: []
+ instance_dns:
+ description:
+ - define the instance's Primary DNS server
+ required: false
+ aliases: [ dns ]
+ version_added: "2.1"
+ instance_domain:
+ description:
+ - define the instance's Domain
+ required: false
+ aliases: [ domain ]
+ version_added: "2.1"
+ instance_hostname:
+ description:
+ - define the instance's Hostname
+ required: false
+ aliases: [ hostname ]
+ version_added: "2.1"
+ instance_ip:
+ description:
+ - define the instance's IP
+ required: false
+ aliases: [ ip ]
+ version_added: "2.1"
+ instance_netmask:
+ description:
+ - define the instance's Netmask
+ required: false
+ aliases: [ netmask ]
+ version_added: "2.1"
+ instance_rootpw:
+ description:
+ - define the instance's Root password
+ required: false
+ aliases: [ rootpw ]
+ version_added: "2.1"
+ instance_key:
+ description:
+ - define the instance's Authorized key
+ required: false
+ aliases: [ key ]
+ version_added: "2.1"
+ state:
+ description:
+ - create, terminate or remove instances
+ default: 'present'
+ required: false
+ aliases: []
+ choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
+
+requirements:
+ - "python >= 2.6"
+ - "ovirt-engine-sdk-python"
+'''
+EXAMPLES = '''
+# Basic example provisioning from image.
+
+ovirt:
+ user: admin@internal
+ url: https://ovirt.example.com
+ instance_name: ansiblevm04
+ password: secret
+ image: centos_64
+ zone: cluster01
+ resource_type: template"
+
+# Full example to create new instance from scratch
+ovirt:
+ instance_name: testansible
+ resource_type: new
+ instance_type: server
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ instance_disksize: 10
+ zone: cluster01
+ region: datacenter1
+ instance_cpus: 1
+ instance_nic: nic1
+ instance_network: rhevm
+ instance_mem: 1000
+ disk_alloc: thin
+ sdomain: FIBER01
+ instance_cores: 1
+ instance_os: rhel_6x64
+ disk_int: virtio"
+
+# stopping an instance
+ovirt:
+ instance_name: testansible
+ state: stopped
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+# starting an instance
+ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+# starting an instance with cloud init information
+ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ hostname: testansible
+ domain: ansible.local
+ ip: 192.0.2.100
+ netmask: 255.255.255.0
+ gateway: 192.0.2.1
+ rootpw: bigsecret
+
+'''
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_OVIRTSDK = True
+except ImportError:
+ HAS_OVIRTSDK = False
+
+# ------------------------------------------------------------------- #
+# create connection with API
+#
+def conn(url, user, password):
+ api = API(url=url, username=user, password=password, insecure=True)
+ try:
+ value = api.test()
+ except:
+ raise Exception("error connecting to the oVirt API")
+ return api
+
+# ------------------------------------------------------------------- #
+# Create VM from scratch
+def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
+ if vmdisk_alloc == 'thin':
+ # define VM params
+ vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
+ # define disk params
+ vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
+ elif vmdisk_alloc == 'preallocated':
+ # define VM params
+ vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
+ # define disk params
+ vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
+
+ try:
+ conn.vms.add(vmparams)
+ except:
+ raise Exception("Error creating VM with specified parameters")
+ vm = conn.vms.get(name=vmname)
+ try:
+ vm.disks.add(vmdisk)
+ except:
+ raise Exception("Error attaching disk")
+ try:
+ vm.nics.add(nic_net1)
+ except:
+ raise Exception("Error adding nic")
+
+
+# create an instance from a template
+def create_vm_template(conn, vmname, image, zone):
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
+ try:
+ conn.vms.add(vmparams)
+ except:
+ raise Exception('error adding template %s' % image)
+
+
+# start instance
+def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
+ domain=None, dns=None, rootpw=None, key=None):
+ vm = conn.vms.get(name=vmname)
+ use_cloud_init = False
+ nics = None
+ nic = None
+ if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
+ use_cloud_init = True
+ if ip and netmask and gateway:
+ ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
+ nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
+ nics = params.Nics()
+ nics = params.GuestNicsConfiguration(nic_configuration=[nic])
+ initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
+ root_password=rootpw, nic_configurations=nics, dns_servers=dns,
+ authorized_ssh_keys=key)
+ action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
+ vm.start(action=action)
+
+# Stop instance
+def vm_stop(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+
+# restart instance
+def vm_restart(conn, vmname):
+ state = vm_status(conn, vmname)
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+ while conn.vms.get(vmname).get_status().get_state() != 'down':
+ time.sleep(5)
+ vm.start()
+
+# remove an instance
+def vm_remove(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.delete()
+
+# ------------------------------------------------------------------- #
+# VM statuses
+#
+# Get the VMs status
+def vm_status(conn, vmname):
+ status = conn.vms.get(name=vmname).status.state
+ return status
+
+
+# Get VM object and return it's name if object exists
+def get_vm(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ if vm == None:
+ name = "empty"
+ else:
+ name = vm.get_name()
+ return name
+
+# ------------------------------------------------------------------- #
+# Hypervisor operations
+#
+# not available yet
+# ------------------------------------------------------------------- #
+# Main
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
+ #name = dict(required=True),
+ user = dict(required=True),
+ url = dict(required=True),
+ instance_name = dict(required=True, aliases=['vmname']),
+ password = dict(required=True, no_log=True),
+ image = dict(),
+ resource_type = dict(choices=['new', 'template']),
+ zone = dict(),
+ instance_disksize = dict(aliases=['vm_disksize']),
+ instance_cpus = dict(default=1, aliases=['vmcpus']),
+ instance_nic = dict(aliases=['vmnic']),
+ instance_network = dict(default='rhevm', aliases=['vmnetwork']),
+ instance_mem = dict(aliases=['vmmem']),
+ instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
+ disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
+ disk_int = dict(default='virtio', choices=['virtio', 'ide']),
+ instance_os = dict(aliases=['vmos']),
+ instance_cores = dict(default=1, aliases=['vmcores']),
+ instance_hostname = dict(aliases=['hostname']),
+ instance_ip = dict(aliases=['ip']),
+ instance_netmask = dict(aliases=['netmask']),
+ instance_gateway = dict(aliases=['gateway']),
+ instance_domain = dict(aliases=['domain']),
+ instance_dns = dict(aliases=['dns']),
+ instance_rootpw = dict(aliases=['rootpw']),
+ instance_key = dict(aliases=['key']),
+ sdomain = dict(),
+ region = dict(),
+ )
+ )
+
+ if not HAS_OVIRTSDK:
+ module.fail_json(msg='ovirtsdk required for this module')
+
+ state = module.params['state']
+ user = module.params['user']
+ url = module.params['url']
+ vmname = module.params['instance_name']
+ password = module.params['password']
+ image = module.params['image'] # name of the image to deploy
+ resource_type = module.params['resource_type'] # template or from scratch
+ zone = module.params['zone'] # oVirt cluster
+ vmdisk_size = module.params['instance_disksize'] # disksize
+ vmcpus = module.params['instance_cpus'] # number of cpu
+ vmnic = module.params['instance_nic'] # network interface
+ vmnetwork = module.params['instance_network'] # logical network
+ vmmem = module.params['instance_mem'] # mem size
+ vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
+ vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
+ vmos = module.params['instance_os'] # Operating System
+ vmtype = module.params['instance_type'] # server or desktop
+ vmcores = module.params['instance_cores'] # number of cores
+ sdomain = module.params['sdomain'] # storage domain to store disk on
+ region = module.params['region'] # oVirt Datacenter
+ hostname = module.params['instance_hostname']
+ ip = module.params['instance_ip']
+ netmask = module.params['instance_netmask']
+ gateway = module.params['instance_gateway']
+ domain = module.params['instance_domain']
+ dns = module.params['instance_dns']
+ rootpw = module.params['instance_rootpw']
+ key = module.params['instance_key']
+ #initialize connection
+ try:
+ c = conn(url+"/api", user, password)
+ except Exception, e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if get_vm(c, vmname) == "empty":
+ if resource_type == 'template':
+ try:
+ create_vm_template(c, vmname, image, zone)
+ except Exception, e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
+ elif resource_type == 'new':
+ # FIXME: refactor, use keyword args.
+ try:
+ create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
+ except Exception, e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
+ else:
+ module.exit_json(changed=False, msg="You did not specify a resource type")
+ else:
+ module.exit_json(changed=False, msg="VM %s already exists" % vmname)
+
+ if state == 'started':
+ if vm_status(c, vmname) == 'up':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmname)
+ else:
+ #vm_start(c, vmname)
+ vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
+ module.exit_json(changed=True, msg="VM %s started" % vmname)
+
+ if state == 'shutdown':
+ if vm_status(c, vmname) == 'down':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
+ else:
+ vm_stop(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
+
+ if state == 'restart':
+ if vm_status(c, vmname) == 'up':
+ vm_restart(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
+ else:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmname)
+
+ if state == 'absent':
+ if get_vm(c, vmname) == "empty":
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
+ else:
+ vm_remove(c, vmname)
+ module.exit_json(changed=True, msg="VM %s removed" % vmname)
+
+
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/misc/proxmox.py b/lib/ansible/modules/extras/cloud/misc/proxmox.py
new file mode 100644
index 0000000000..d0df6b3f42
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/proxmox.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+version_added: "2.0"
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ default: null
+ required: false
+ vmid:
+ description:
+ - the instance id
+ default: null
+ required: true
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: false
+ required: false
+ type: boolean
+ node:
+ description:
+ - Proxmox VE node, when new VM will be created
+ - required only for C(state=present)
+ - for another states will be autodiscovered
+ default: null
+ required: false
+ password:
+ description:
+ - the instance root password
+ - required only for C(state=present)
+ default: null
+ required: false
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ default: null
+ required: false
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ default: null
+ required: false
+ disk:
+ description:
+ - hard disk size in GB for instance
+ default: 3
+ required: false
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ default: 1
+ required: false
+ memory:
+ description:
+ - memory size in MB for instance
+ default: 512
+ required: false
+ swap:
+ description:
+ - swap memory size in MB for instance
+ default: 0
+ required: false
+ netif:
+ description:
+ - specifies network interfaces for the container
+ default: null
+ required: false
+ type: A hash/dictionary defining interfaces
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container
+ default: null
+ required: false
+ type: A hash/dictionary defining mount points
+ version_added: "2.2"
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ default: null
+ required: false
+ type: string
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ default: false
+ required: false
+ type: boolean
+ storage:
+ description:
+ - target storage
+ default: 'local'
+ required: false
+ type: string
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ default: 1000
+ required: false
+ type: integer
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ default: null
+ required: false
+ type: string
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ default: null
+ required: false
+ type: string
+ timeout:
+ description:
+ - timeout for operations
+ default: 30
+ required: false
+ type: integer
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ default: false
+ required: false
+ type: boolean
+ state:
+ description:
+ - Indicate desired state of the instance
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: "Sergei Antipov @UnderGreen"
+'''
+
+EXAMPLES = '''
+# Create new container with minimal options
+- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+# Create new container with minimal options with force(it will rewrite existing container)
+- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
+
+# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+# Create new container with minimal options defining network interface with dhcp
+- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' netif='{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+# Create new container with minimal options defining a mount
+- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' mounts='{"mp0":"local:8,mp=/mnt/test/"}'
+
+# Start container
+- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
+
+# Stop container
+- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
+
+# Stop container with force
+- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
+
+# Restart container(stopped or mounted container you can't restart)
+- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
+
+# Remove container
+- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+VZ_TYPE=None
+
+def get_instance(proxmox, vmid):
+ return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
+
+def content_check(proxmox, node, ostemplate, template_store):
+ return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ]
+
+def node_check(proxmox, node):
+ return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
+
+def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+ kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
+ if VZ_TYPE =='lxc':
+ kwargs['cpulimit']=cpus
+ kwargs['rootfs']=disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ else:
+ kwargs['cpus']=cpus
+ kwargs['disk']=disk
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def start_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def stop_instance(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def umount_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_host = dict(required=True),
+ api_user = dict(required=True),
+ api_password = dict(no_log=True),
+ vmid = dict(required=True),
+ validate_certs = dict(type='bool', default='no'),
+ node = dict(),
+ password = dict(no_log=True),
+ hostname = dict(),
+ ostemplate = dict(),
+ disk = dict(type='str', default='3'),
+ cpus = dict(type='int', default=1),
+ memory = dict(type='int', default=512),
+ swap = dict(type='int', default=0),
+ netif = dict(type='dict'),
+ mounts = dict(type='dict'),
+ ip_address = dict(),
+ onboot = dict(type='bool', default='no'),
+ storage = dict(default='local'),
+ cpuunits = dict(type='int', default=1000),
+ nameserver = dict(),
+ searchdomain = dict(),
+ timeout = dict(type='int', default=30),
+ force = dict(type='bool', default='no'),
+ state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_user = module.params['api_user']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError, e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+
+ try:
+ proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
+
+ except Exception, e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ if get_instance(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
+ module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+
+ create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
+ password = module.params['password'],
+ hostname = module.params['hostname'],
+ ostemplate = module.params['ostemplate'],
+ netif = module.params['netif'],
+ mounts = module.params['mounts'],
+ ip_address = module.params['ip_address'],
+ onboot = int(module.params['onboot']),
+ cpuunits = module.params['cpuunits'],
+ nameserver = module.params['nameserver'],
+ searchdomain = module.params['searchdomain'],
+ force = int(module.params['force']))
+
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception, e:
+ module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
+
+ elif state == 'started':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception, e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'stopped':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if umount_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception, e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'restarted':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
+ or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
+ start_instance(module, proxmox, vm, vmid, timeout) ):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception, e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'absent':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception, e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/misc/proxmox_template.py b/lib/ansible/modules/extras/cloud/misc/proxmox_template.py
new file mode 100644
index 0000000000..6434e59be2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/proxmox_template.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+version_added: "2.0"
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ default: null
+ required: false
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: false
+ required: false
+ type: boolean
+ node:
+ description:
+ - Proxmox VE node, when you will operate with template
+ default: null
+ required: true
+ src:
+ description:
+ - path to uploaded file
+ - required only for C(state=present)
+ default: null
+ required: false
+ aliases: ['path']
+ template:
+ description:
+ - the template name
+ - required only for states C(absent), C(info)
+ default: null
+ required: false
+ content_type:
+ description:
+ - content type
+ - required only for C(state=present)
+ default: 'vztmpl'
+ required: false
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - target storage
+ default: 'local'
+ required: false
+ type: string
+ timeout:
+ description:
+ - timeout for operations
+ default: 30
+ required: false
+ type: integer
+ force:
+ description:
+ - can be used only with C(state=present), exists template will be overwritten
+ default: false
+ required: false
+ type: boolean
+ state:
+ description:
+ - Indicate desired state of the template
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: "Sergei Antipov @UnderGreen"
+'''
+
+EXAMPLES = '''
+# Upload new openvz template with minimal options
+- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz'
+
+# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz'
+
+# Upload new openvz template with all options and force overwrite
+- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes
+
+# Delete template with minimal options
+- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+def get_template(proxmox, node, storage, content_type, template):
+ return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ]
+
+def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
+ taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath))
+ while timeout:
+ task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
+ if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
+ % proxmox.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ proxmox.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not get_template(proxmox, node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_host = dict(required=True),
+ api_user = dict(required=True),
+ api_password = dict(no_log=True),
+ validate_certs = dict(type='bool', default='no'),
+ node = dict(),
+ src = dict(),
+ template = dict(),
+ content_type = dict(default='vztmpl', choices=['vztmpl','iso']),
+ storage = dict(default='local'),
+ timeout = dict(type='int', default=30),
+ force = dict(type='bool', default='no'),
+ state = dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_user = module.params['api_user']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError, e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+
+ try:
+ proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
+ except Exception, e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ from ansible import utils
+ realpath = utils.path_dwim(None, src)
+ template = os.path.basename(realpath)
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(realpath) and os.path.isfile(realpath)):
+ module.fail_json(msg='template file on path %s not exists' % realpath)
+
+ if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception, e:
+ module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e ))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param is mandatory')
+ elif not get_template(proxmox, node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception, e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e ))
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/misc/rhevm.py b/lib/ansible/modules/extras/cloud/misc/rhevm.py
new file mode 100644
index 0000000000..9d12a802af
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/rhevm.py
@@ -0,0 +1,1530 @@
+#!/usr/bin/python
+
+# (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: rhevm
+author: Timothy Vandenbrande
+short_description: RHEV/oVirt automation
+description:
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+version_added: "2.2"
+requirements:
+ - ovirtsdk
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ default: "admin@internal"
+ required: false
+ server:
+ description:
+ - The name/ip of your RHEV-m/oVirt instance.
+ default: "127.0.0.1"
+ required: false
+ port:
+ description:
+ - The port on which the API is reacheable.
+ default: "443"
+ required: false
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ default: false
+ required: false
+ name:
+ description:
+ - The name of the VM.
+ cluster:
+ description:
+ - The rhev/ovirt cluster in which you want you VM to start.
+ required: false
+ datacenter:
+ description:
+ - The rhev/ovirt datacenter in which you want you VM to start.
+ required: false
+ default: "Default"
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ default: "present"
+ required: false
+ choices: ['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']
+ image:
+ description:
+ - The template to use for the VM.
+ default: null
+ required: false
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ default: server
+ required: false
+ choices: [ 'server', 'desktop', 'host' ]
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ required: false
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ default: "2"
+ required: false
+ cpu_share:
+ description:
+ - This parameter is used to configure the cpu share.
+ default: "0"
+ required: false
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ default: "1"
+ required: false
+ osver:
+ description:
+ - The operationsystem option in RHEV/oVirt.
+ default: "rhel_6x64"
+ required: false
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ default: "1"
+ required: false
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ default: true
+ required: false
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ required: false
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ aliases: ['nics', 'interfaces']
+ required: false
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ default: ["network","hd"]
+ required: false
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ default: true
+ required: false
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ default: null
+ required: false
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up')
+ - When I(state = 'down')
+ - When I(state = 'restarted')
+ default: null
+ required: false
+'''
+
+RETURN = '''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: '{
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }'
+'''
+
+EXAMPLES = '''
+# basic get info from VM
+ action: rhevm
+ args:
+ name: "demo"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ state: "info"
+
+# basic create example from image
+ action: rhevm
+ args:
+ name: "demo"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ state: "present"
+ image: "centos7_x64"
+ cluster: "centos"
+
+# power management
+ action: rhevm
+ args:
+ name: "uptime_server"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ cluster: "RH"
+ state: "down"
+ image: "centos7_x64"
+ cluster: "centos
+
+# multi disk, multi nic create example
+ action: rhevm
+ args:
+ name: "server007"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ cluster: "RH"
+ state: "present"
+ type: "server"
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: "eth0"
+ vlan: "vlan2202"
+ - name: "eth1"
+ vlan: "vlan36"
+ - name: "eth2"
+ vlan: "vlan38"
+ - name: "eth3"
+ vlan: "vlan2202"
+ disks:
+ - name: "root"
+ size: 10
+ domain: "ssd-san"
+ - name: "swap"
+ size: 10
+ domain: "15kiscsi-san"
+ - name: "opt"
+ size: 10
+ domain: "15kiscsi-san"
+ - name: "var"
+ size: 10
+ domain: "10kiscsi-san"
+ - name: "home"
+ size: 10
+ domain: "sata-san"
+ boot_order:
+ - "network"
+ - "hd"
+
+# add a CD to the disk cd_drive
+ action: rhevm
+ args:
+ name: 'server007'
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ state: 'cd'
+ cd_drive: 'rhev-tools-setup.iso'
+
+# new host deployment + host network configuration
+ action: rhevm
+ args:
+ name: "ovirt_node007"
+ password: "{{ rhevm.admin.pass }}"
+ type: "host"
+ state: present
+ cluster: "rhevm01"
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: '172.31.224.200'
+ netmask: '255.255.254.0'
+ - name: p3p2
+ ip: '172.31.225.200'
+ netmask: '255.255.254.0'
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: 'rhevm'
+ ip: '172.31.222.200'
+ netmask: '255.255.255.0'
+ management: True
+ - name: bond0.36
+ network: 'vlan36'
+ ip: '10.2.36.200'
+ netmask: '255.255.254.0'
+ gateway: '10.2.36.254'
+ - name: bond0.2202
+ network: 'vlan2202'
+ - name: bond0.38
+ network: 'vlan38'
+'''
+
+import time
+import sys
+import traceback
+import json
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['server', 'desktop', 'host']
+STATE_OPTS = ['ping', 'present', 'absent', 'up', 'down', 'restart', 'cd', 'info']
+
+global msg, changed, failed
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has succesfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface.keys():
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface.keys():
+ if 'ip' in iface.keys():
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface.keys():
+ iface['ip'] = ''
+ if 'netmask' not in iface.keys():
+ iface['netmask'] = ''
+ if 'gateway' not in iface.keys():
+ iface['gateway'] = ''
+
+ if 'network' in iface.keys():
+ if 'bond' in iface.keys():
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves = params.Slaves(host_nic = bond),
+ options = params.Options(
+ option = [
+ params.Option(name = 'miimon', value = '100'),
+ params.Option(name = 'mode', value = '4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network = params.Network(name = iface['network']),
+ name = iface['name'],
+ boot_protocol = iface['boot_protocol'],
+ ip = params.IP(
+ address = iface['ip'],
+ netmask = iface['netmask'],
+ gateway = iface['gateway']
+ ),
+ override_configuration = True,
+ bonding = tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network = params.Network(name = iface['network']),
+ name = iface['name'],
+ boot_protocol = iface['boot_protocol'],
+ ip = params.IP(
+ address = iface['ip'],
+ netmask = iface['netmask'],
+ gateway = iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity = False,
+ host_nics = params.HostNics(host_nic = networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_iso)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_iso)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ vm.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) / 1024 / 1024 / 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) / 1024 / 1024 / 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception as e:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) / 1024 / 1024 / 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ VM = self.conn.get_VM(vmname)
+ HOST = self.conn.get_Host(vmhost)
+
+ if VM.placement_policy.host is None:
+ self.conn.set_VM_Host(vmname, vmhost)
+ elif str(VM.placement_policy.host.id) != str(HOST.id):
+ self.conn.set_VM_Host(vmname, vmhost)
+ else:
+ setMsg("VM's startup host was already set to " + vmhost)
+ checkFail()
+
+ if str(VM.status.state) == "up":
+ self.conn.migrate_VM(vmname, vmhost)
+ checkFail()
+
+ return True
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ global failed
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state', 'present')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if int(memory_policy) == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == int(memory_policy):
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == int(memory):
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if int(memory_policy) > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == int(cpu):
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == int(cpu_share):
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost is not False and vmhost is not "False":
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
+ user = dict(default="admin@internal"),
+ password = dict(required=True),
+ server = dict(default="127.0.0.1"),
+ port = dict(default="443"),
+ insecure_api = dict(default=False, type='bool'),
+ name = dict(),
+ image = dict(default=False),
+ datacenter = dict(default="Default"),
+ type = dict(default="server", choices=['server', 'desktop', 'host']),
+ cluster = dict(default=''),
+ vmhost = dict(default=False),
+ vmcpu = dict(default="2"),
+ vmmem = dict(default="1"),
+ disks = dict(),
+ osver = dict(default="rhel_6x64"),
+ ifaces = dict(aliases=['nics', 'interfaces']),
+ timeout = dict(default=False),
+ mempol = dict(default="1"),
+ vm_ha = dict(default=True),
+ cpu_share = dict(default="0"),
+ boot_order = dict(default=["network", "hd"]),
+ del_prot = dict(default=True, type="bool"),
+ cd_drive = dict(default=False)
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(
+ msg='The `ovirtsdk` module is not importable. Check the requirements.'
+ )
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/misc/virt.py b/lib/ansible/modules/extras/cloud/misc/virt.py
new file mode 100644
index 0000000000..be1a7f9ec6
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/virt.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Virt management features
+
+Copyright 2007, 2012 Red Hat, Inc
+Michael DeHaan <michael.dehaan@gmail.com>
+Seth Vidal <skvidal@fedoraproject.org>
+
+This software may be freely redistributed under the terms of the GNU
+general public license.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: virt
+short_description: Manages virtual machines supported by libvirt
+description:
+ - Manages virtual machines supported by I(libvirt).
+version_added: "0.2"
+options:
+ name:
+ description:
+ - name of the guest VM being managed. Note that VM must be previously
+ defined with xml.
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - Note that there may be some lag for state requests like C(shutdown)
+ since these refer only to VM states. After starting a guest, it may not
+ be immediately accessible.
+ required: false
+ choices: [ "running", "shutdown", "destroyed", "paused" ]
+ default: "no"
+ command:
+ description:
+ - in addition to state management, various non-idempotent commands are available. See examples
+ required: false
+ choices: ["create","status", "start", "stop", "pause", "unpause",
+ "shutdown", "undefine", "destroy", "get_xml", "autostart",
+ "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
+ uri:
+ description:
+ - libvirt connection uri
+ required: false
+ defaults: qemu:///system
+ xml:
+ description:
+ - XML document used with the define command
+ required: false
+ default: null
+requirements:
+ - "python >= 2.6"
+ - "libvirt-python"
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+ - "Seth Vidal"
+'''
+
+EXAMPLES = '''
+# a playbook task line:
+- virt: name=alpha state=running
+
+# /usr/bin/ansible invocations
+ansible host -m virt -a "name=alpha command=status"
+ansible host -m virt -a "name=alpha command=get_xml"
+ansible host -m virt -a "name=alpha command=create uri=lxc:///"
+
+# a playbook example of defining and launching an LXC guest
+tasks:
+ - name: define vm
+ virt: name=foo
+ command=define
+ xml="{{ lookup('template', 'container-template.xml.j2') }}"
+ uri=lxc:///
+ - name: start vm
+ virt: name=foo state=running uri=lxc:///
+'''
+
+RETURN = '''
+# for list_vms command
+list_vms:
+ description: The list of vms defined on the remote system
+ type: dictionary
+ returned: success
+ sample: [
+ "build.example.org",
+ "dev.example.org"
+ ]
+# for status command
+status:
+ description: The status of the VM, among running, crashed, paused and shutdown
+ type: string
+ sample: "success"
+ returned: success
+'''
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE=2
+
+import sys
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+ALL_COMMANDS = []
+VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
+ 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
+HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
+ALL_COMMANDS.extend(VM_COMMANDS)
+ALL_COMMANDS.extend(HOST_COMMANDS)
+
+VIRT_STATE_NAME_MAP = {
+ 0 : "running",
+ 1 : "running",
+ 2 : "running",
+ 3 : "paused",
+ 4 : "shutdown",
+ 5 : "shutdown",
+ 6 : "crashed"
+}
+
+class VMNotFound(Exception):
+ pass
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ cmd = "uname -r"
+ rc, stdout, stderr = self.module.run_command(cmd)
+
+ if "xen" in stdout:
+ conn = libvirt.open(None)
+ elif "esx" in uri:
+ auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
+ conn = libvirt.openAuth(uri, auth)
+ else:
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_vm(self, vmid):
+ """
+ Extra bonus feature: vmid = -1 returns a list of everything
+ """
+ conn = self.conn
+
+ vms = []
+
+ # this block of code borrowed from virt-manager:
+ # get working domain's name
+ ids = conn.listDomainsID()
+ for id in ids:
+ vm = conn.lookupByID(id)
+ vms.append(vm)
+ # get defined domain
+ names = conn.listDefinedDomains()
+ for name in names:
+ vm = conn.lookupByName(name)
+ vms.append(vm)
+
+ if vmid == -1:
+ return vms
+
+ for vm in vms:
+ if vm.name() == vmid:
+ return vm
+
+ raise VMNotFound("virtual machine %s not found" % vmid)
+
+ def shutdown(self, vmid):
+ return self.find_vm(vmid).shutdown()
+
+ def pause(self, vmid):
+ return self.suspend(self.conn,vmid)
+
+ def unpause(self, vmid):
+ return self.resume(self.conn,vmid)
+
+ def suspend(self, vmid):
+ return self.find_vm(vmid).suspend()
+
+ def resume(self, vmid):
+ return self.find_vm(vmid).resume()
+
+ def create(self, vmid):
+ return self.find_vm(vmid).create()
+
+ def destroy(self, vmid):
+ return self.find_vm(vmid).destroy()
+
+ def undefine(self, vmid):
+ return self.find_vm(vmid).undefine()
+
+ def get_status2(self, vm):
+ state = vm.info()[0]
+ return VIRT_STATE_NAME_MAP.get(state,"unknown")
+
+ def get_status(self, vmid):
+ state = self.find_vm(vmid).info()[0]
+ return VIRT_STATE_NAME_MAP.get(state,"unknown")
+
+ def nodeinfo(self):
+ return self.conn.getInfo()
+
+ def get_type(self):
+ return self.conn.getType()
+
+ def get_xml(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.XMLDesc(0)
+
+ def get_maxVcpus(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.maxVcpus()
+
+ def get_maxMemory(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.maxMemory()
+
+ def getFreeMemory(self):
+ return self.conn.getFreeMemory()
+
+ def get_autostart(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.autostart()
+
+ def set_autostart(self, vmid, val):
+ vm = self.conn.lookupByName(vmid)
+ return vm.setAutostart(val)
+
+ def define_from_xml(self, xml):
+ return self.conn.defineXML(xml)
+
+
+class Virt(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+
+ def __get_conn(self):
+ self.conn = LibvirtConnection(self.uri, self.module)
+ return self.conn
+
+ def get_vm(self, vmid):
+ self.__get_conn()
+ return self.conn.find_vm(vmid)
+
+ def state(self):
+ vms = self.list_vms()
+ state = []
+ for vm in vms:
+ state_blurb = self.conn.get_status(vm)
+ state.append("%s %s" % (vm,state_blurb))
+ return state
+
+ def info(self):
+ vms = self.list_vms()
+ info = dict()
+ for vm in vms:
+ data = self.conn.find_vm(vm).info()
+ # libvirt returns maxMem, memory, and cpuTime as long()'s, which
+ # xmlrpclib tries to convert to regular int's during serialization.
+ # This throws exceptions, so convert them to strings here and
+ # assume the other end of the xmlrpc connection can figure things
+ # out or doesn't care.
+ info[vm] = {
+ "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
+ "maxMem" : str(data[1]),
+ "memory" : str(data[2]),
+ "nrVirtCpu" : data[3],
+ "cpuTime" : str(data[4]),
+ }
+ info[vm]["autostart"] = self.conn.get_autostart(vm)
+
+ return info
+
+ def nodeinfo(self):
+ self.__get_conn()
+ info = dict()
+ data = self.conn.nodeinfo()
+ info = {
+ "cpumodel" : str(data[0]),
+ "phymemory" : str(data[1]),
+ "cpus" : str(data[2]),
+ "cpumhz" : str(data[3]),
+ "numanodes" : str(data[4]),
+ "sockets" : str(data[5]),
+ "cpucores" : str(data[6]),
+ "cputhreads" : str(data[7])
+ }
+ return info
+
+ def list_vms(self, state=None):
+ self.conn = self.__get_conn()
+ vms = self.conn.find_vm(-1)
+ results = []
+ for x in vms:
+ try:
+ if state:
+ vmstate = self.conn.get_status2(x)
+ if vmstate == state:
+ results.append(x.name())
+ else:
+ results.append(x.name())
+ except:
+ pass
+ return results
+
+ def virttype(self):
+ return self.__get_conn().get_type()
+
+ def autostart(self, vmid):
+ self.conn = self.__get_conn()
+ return self.conn.set_autostart(vmid, True)
+
+ def freemem(self):
+ self.conn = self.__get_conn()
+ return self.conn.getFreeMemory()
+
+ def shutdown(self, vmid):
+ """ Make the machine with the given vmid stop running. Whatever that takes. """
+ self.__get_conn()
+ self.conn.shutdown(vmid)
+ return 0
+
+
+ def pause(self, vmid):
+ """ Pause the machine with the given vmid. """
+
+ self.__get_conn()
+ return self.conn.suspend(vmid)
+
+ def unpause(self, vmid):
+ """ Unpause the machine with the given vmid. """
+
+ self.__get_conn()
+ return self.conn.resume(vmid)
+
+ def create(self, vmid):
+ """ Start the machine via the given vmid """
+
+ self.__get_conn()
+ return self.conn.create(vmid)
+
+ def start(self, vmid):
+ """ Start the machine via the given id/name """
+
+ self.__get_conn()
+ return self.conn.create(vmid)
+
+ def destroy(self, vmid):
+ """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
+ self.__get_conn()
+ return self.conn.destroy(vmid)
+
+ def undefine(self, vmid):
+ """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
+
+ self.__get_conn()
+ return self.conn.undefine(vmid)
+
+ def status(self, vmid):
+ """
+ Return a state suitable for server consumption. Aka, codes.py values, not XM output.
+ """
+ self.__get_conn()
+ return self.conn.get_status(vmid)
+
+ def get_xml(self, vmid):
+ """
+ Receive a Vm id as input
+ Return an xml describing vm config returned by a libvirt call
+ """
+
+ self.__get_conn()
+ return self.conn.get_xml(vmid)
+
+ def get_maxVcpus(self, vmid):
+ """
+ Gets the max number of VCPUs on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.get_maxVcpus(vmid)
+
+ def get_max_memory(self, vmid):
+ """
+ Gets the max memory on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.get_MaxMemory(vmid)
+
+ def define(self, xml):
+ """
+ Define a guest with the given xml
+ """
+ self.__get_conn()
+ return self.conn.define_from_xml(xml)
+
+def core(module):
+
+ state = module.params.get('state', None)
+ guest = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
+
+ v = Virt(uri, module)
+ res = {}
+
+ if state and command=='list_vms':
+ res = v.list_vms(state=state)
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ if state:
+ if not guest:
+ module.fail_json(msg = "state change requires a guest specified")
+
+ res['changed'] = False
+ if state == 'running':
+ if v.status(guest) is 'paused':
+ res['changed'] = True
+ res['msg'] = v.unpause(guest)
+ elif v.status(guest) is not 'running':
+ res['changed'] = True
+ res['msg'] = v.start(guest)
+ elif state == 'shutdown':
+ if v.status(guest) is not 'shutdown':
+ res['changed'] = True
+ res['msg'] = v.shutdown(guest)
+ elif state == 'destroyed':
+ if v.status(guest) is not 'shutdown':
+ res['changed'] = True
+ res['msg'] = v.destroy(guest)
+ elif state == 'paused':
+ if v.status(guest) is 'running':
+ res['changed'] = True
+ res['msg'] = v.pause(guest)
+ else:
+ module.fail_json(msg="unexpected state")
+
+ return VIRT_SUCCESS, res
+
+ if command:
+ if command in VM_COMMANDS:
+ if not guest:
+ module.fail_json(msg = "%s requires 1 argument: guest" % command)
+ if command == 'define':
+ if not xml:
+ module.fail_json(msg = "define requires xml argument")
+ try:
+ v.get_vm(guest)
+ except VMNotFound:
+ v.define(xml)
+ res = {'changed': True, 'created': guest}
+ return VIRT_SUCCESS, res
+ res = getattr(v, command)(guest)
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ elif hasattr(v, command):
+ res = getattr(v, command)()
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ else:
+ module.fail_json(msg="Command %s not recognized" % basecmd)
+
+ module.fail_json(msg="expected state or command parameter to be specified")
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ name = dict(aliases=['guest']),
+ state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
+ command = dict(choices=ALL_COMMANDS),
+ uri = dict(default='qemu:///system'),
+ xml = dict(),
+ ))
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/misc/virt_net.py b/lib/ansible/modules/extras/cloud/misc/virt_net.py
new file mode 100755
index 0000000000..e2dd88f4d4
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/virt_net.py
@@ -0,0 +1,594 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: virt_net
+author: "Maciej Delmanowski (@drybjed)"
+version_added: "2.0"
+short_description: Manage libvirt network configuration
+description:
+ - Manage I(libvirt) networks.
+options:
+ name:
+ required: true
+ aliases: ['network']
+ description:
+ - name of the network being managed. Note that network must be previously
+ defined with xml.
+ state:
+ required: false
+ choices: [ "active", "inactive", "present", "absent" ]
+ description:
+ - specify which state you want a network to be in.
+ If 'active', network will be started.
+ If 'present', ensure that network is present but do not change its
+ state; if it's missing, you need to specify xml argument.
+ If 'inactive', network will be stopped.
+ If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
+ command:
+ required: false
+ choices: [ "define", "create", "start", "stop", "destroy",
+ "undefine", "get_xml", "list_nets", "facts",
+ "info", "status", "modify"]
+ description:
+ - in addition to state management, various non-idempotent commands are available.
+ See examples.
+ Modify was added in version 2.1
+ autostart:
+ required: false
+ choices: ["yes", "no"]
+ description:
+ - Specify if a given storage pool should be started automatically on system boot.
+ uri:
+ required: false
+ default: "qemu:///system"
+ description:
+ - libvirt connection uri.
+ xml:
+ required: false
+ description:
+ - XML document used with the define command.
+requirements:
+ - "python >= 2.6"
+ - "python-libvirt"
+ - "python-lxml"
+'''
+
+EXAMPLES = '''
+# Define a new network
+- virt_net: command=define name=br_nat xml='{{ lookup("template", "network/bridge.xml.j2") }}'
+
+# Start a network
+- virt_net: command=create name=br_nat
+
+# List available networks
+- virt_net: command=list_nets
+
+# Get XML data of a specified network
+- virt_net: command=get_xml name=br_nat
+
+# Stop a network
+- virt_net: command=destroy name=br_nat
+
+# Undefine a network
+- virt_net: command=undefine name=br_nat
+
+# Gather facts about networks
+# Facts will be available as 'ansible_libvirt_networks'
+- virt_net: command=facts
+
+# Gather information about network managed by 'libvirt' remotely using uri
+- virt_net: command=info uri='{{ item }}'
+ with_items: libvirt_uris
+ register: networks
+
+# Ensure that a network is active (needs to be defined and built first)
+- virt_net: state=active name=br_nat
+
+# Ensure that a network is inactive
+- virt_net: state=inactive name=br_nat
+
+# Ensure that a given network will be started at boot
+- virt_net: autostart=yes name=br_nat
+
+# Disable autostart for a given network
+- virt_net: autostart=no name=br_nat
+'''
+
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE=2
+
+import sys
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+try:
+ from lxml import etree
+except ImportError:
+ HAS_XML = False
+else:
+ HAS_XML = True
+
+ALL_COMMANDS = []
+ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
+ 'undefine', 'destroy', 'get_xml', 'define',
+ 'modify' ]
+HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
+ALL_COMMANDS.extend(ENTRY_COMMANDS)
+ALL_COMMANDS.extend(HOST_COMMANDS)
+
+ENTRY_STATE_ACTIVE_MAP = {
+ 0 : "inactive",
+ 1 : "active"
+}
+
+ENTRY_STATE_AUTOSTART_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+ENTRY_STATE_PERSISTENT_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+class EntryNotFound(Exception):
+ pass
+
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_entry(self, entryid):
+ # entryid = -1 returns a list of everything
+
+ results = []
+
+ # Get active entries
+ for name in self.conn.listNetworks():
+ entry = self.conn.networkLookupByName(name)
+ results.append(entry)
+
+ # Get inactive entries
+ for name in self.conn.listDefinedNetworks():
+ entry = self.conn.networkLookupByName(name)
+ results.append(entry)
+
+ if entryid == -1:
+ return results
+
+ for entry in results:
+ if entry.name() == entryid:
+ return entry
+
+ raise EntryNotFound("network %s not found" % entryid)
+
+ def create(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).create()
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ except:
+ return self.module.exit_json(changed=True)
+ if not state:
+ return self.module.exit_json(changed=True)
+
+ def modify(self, entryid, xml):
+ network = self.find_entry(entryid)
+ # identify what type of entry is given in the xml
+ new_data = etree.fromstring(xml)
+ old_data = etree.fromstring(network.XMLDesc(0))
+ if new_data.tag == 'host':
+ mac_addr = new_data.get('mac')
+ hosts = old_data.xpath('/network/ip/dhcp/host')
+ # find the one mac we're looking for
+ host = None
+ for h in hosts:
+ if h.get('mac') == mac_addr:
+ host = h
+ break
+ if host is None:
+ # add the host
+ if not self.module.check_mode:
+ res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
+ libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
+ -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
+ else:
+ # pretend there was a change
+ res = 0
+ if res == 0:
+ return True
+ else:
+ # change the host
+ if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'):
+ return False
+ else:
+ if not self.module.check_mode:
+ res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
+ libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
+ -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
+ else:
+ # pretend there was a change
+ res = 0
+ if res == 0:
+ return True
+ # command, section, parentIndex, xml, flags=0
+ self.module.fail_json(msg='updating this is not supported yet '+unicode(xml))
+
+ def destroy(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).destroy()
+ else:
+ if self.find_entry(entryid).isActive():
+ return self.module.exit_json(changed=True)
+
+ def undefine(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).undefine()
+ else:
+ if not self.find_entry(entryid):
+ return self.module.exit_json(changed=True)
+
+ def get_status2(self, entry):
+ state = entry.isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+
+ def get_status(self, entryid):
+ if not self.module.check_mode:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ except:
+ return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
+
+ def get_uuid(self, entryid):
+ return self.find_entry(entryid).UUIDString()
+
+ def get_xml(self, entryid):
+ return self.find_entry(entryid).XMLDesc(0)
+
+ def get_forward(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/network/forward')[0].get('mode')
+ except:
+ raise ValueError('Forward mode not specified')
+ return result
+
+ def get_domain(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/network/domain')[0].get('name')
+ except:
+ raise ValueError('Domain not specified')
+ return result
+
+ def get_macaddress(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/network/mac')[0].get('address')
+ except:
+ raise ValueError('MAC address not specified')
+ return result
+
+ def get_autostart(self, entryid):
+ state = self.find_entry(entryid).autostart()
+ return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
+
+ def get_autostart2(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).autostart()
+ else:
+ try:
+ return self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+
+ def set_autostart(self, entryid, val):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).setAutostart(val)
+ else:
+ try:
+ state = self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+ if bool(state) != val:
+ return self.module.exit_json(changed=True)
+
+ def get_bridge(self, entryid):
+ return self.find_entry(entryid).bridgeName()
+
+ def get_persistent(self, entryid):
+ state = self.find_entry(entryid).isPersistent()
+ return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
+
+ def define_from_xml(self, entryid, xml):
+ if not self.module.check_mode:
+ return self.conn.networkDefineXML(xml)
+ else:
+ try:
+ state = self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+
+
+class VirtNetwork(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+ self.conn = LibvirtConnection(self.uri, self.module)
+
+ def get_net(self, entryid):
+ return self.conn.find_entry(entryid)
+
+ def list_nets(self, state=None):
+ results = []
+ for entry in self.conn.find_entry(-1):
+ if state:
+ if state == self.conn.get_status2(entry):
+ results.append(entry.name())
+ else:
+ results.append(entry.name())
+ return results
+
+ def state(self):
+ results = []
+ for entry in self.list_nets():
+ state_blurb = self.conn.get_status(entry)
+ results.append("%s %s" % (entry,state_blurb))
+ return results
+
+ def autostart(self, entryid):
+ return self.conn.set_autostart(entryid, True)
+
+ def get_autostart(self, entryid):
+ return self.conn.get_autostart2(entryid)
+
+ def set_autostart(self, entryid, state):
+ return self.conn.set_autostart(entryid, state)
+
+ def create(self, entryid):
+ return self.conn.create(entryid)
+
+ def modify(self, entryid, xml):
+ return self.conn.modify(entryid, xml)
+
+ def start(self, entryid):
+ return self.conn.create(entryid)
+
+ def stop(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def destroy(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def undefine(self, entryid):
+ return self.conn.undefine(entryid)
+
+ def status(self, entryid):
+ return self.conn.get_status(entryid)
+
+ def get_xml(self, entryid):
+ return self.conn.get_xml(entryid)
+
+ def define(self, entryid, xml):
+ return self.conn.define_from_xml(entryid, xml)
+
+ def info(self):
+ return self.facts(facts_mode='info')
+
+ def facts(self, facts_mode='facts'):
+ results = dict()
+ for entry in self.list_nets():
+ results[entry] = dict()
+ results[entry]["autostart"] = self.conn.get_autostart(entry)
+ results[entry]["persistent"] = self.conn.get_persistent(entry)
+ results[entry]["state"] = self.conn.get_status(entry)
+ results[entry]["bridge"] = self.conn.get_bridge(entry)
+ results[entry]["uuid"] = self.conn.get_uuid(entry)
+
+ try:
+ results[entry]["forward_mode"] = self.conn.get_forward(entry)
+ except ValueError as e:
+ pass
+
+ try:
+ results[entry]["domain"] = self.conn.get_domain(entry)
+ except ValueError as e:
+ pass
+
+ try:
+ results[entry]["macaddress"] = self.conn.get_macaddress(entry)
+ except ValueError as e:
+ pass
+
+ facts = dict()
+ if facts_mode == 'facts':
+ facts["ansible_facts"] = dict()
+ facts["ansible_facts"]["ansible_libvirt_networks"] = results
+ elif facts_mode == 'info':
+ facts['networks'] = results
+ return facts
+
+
+def core(module):
+
+ state = module.params.get('state', None)
+ name = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
+ autostart = module.params.get('autostart', None)
+
+ v = VirtNetwork(uri, module)
+ res = {}
+
+ if state and command == 'list_nets':
+ res = v.list_nets(state=state)
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ if state:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if state in [ 'active' ]:
+ if v.status(name) is not 'active':
+ res['changed'] = True
+ res['msg'] = v.start(name)
+ elif state in [ 'present' ]:
+ try:
+ v.get_net(name)
+ except EntryNotFound:
+ if not xml:
+ module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ elif state in [ 'inactive' ]:
+ entries = v.list_nets()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ res['changed'] = True
+ res['msg'] = v.destroy(name)
+ elif state in [ 'undefined', 'absent' ]:
+ entries = v.list_nets()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ v.destroy(name)
+ res['changed'] = True
+ res['msg'] = v.undefine(name)
+ else:
+ module.fail_json(msg="unexpected state")
+
+ return VIRT_SUCCESS, res
+
+ if command:
+ if command in ENTRY_COMMANDS:
+ if not name:
+ module.fail_json(msg = "%s requires 1 argument: name" % command)
+ if command in ('define', 'modify'):
+ if not xml:
+ module.fail_json(msg = command+" requires xml argument")
+ try:
+ v.get_net(name)
+ except EntryNotFound:
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ else:
+ if command == 'modify':
+ mod = v.modify(name, xml)
+ res = {'changed': mod, 'modified': name}
+ return VIRT_SUCCESS, res
+ res = getattr(v, command)(name)
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ elif hasattr(v, command):
+ res = getattr(v, command)()
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ else:
+ module.fail_json(msg="Command %s not recognized" % basecmd)
+
+ if autostart is not None:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if autostart:
+ if not v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, True)
+ else:
+ if v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, False)
+
+ return VIRT_SUCCESS, res
+
+ module.fail_json(msg="expected state or command parameter to be specified")
+
+
+def main():
+
+ module = AnsibleModule (
+ argument_spec = dict(
+ name = dict(aliases=['network']),
+ state = dict(choices=['active', 'inactive', 'present', 'absent']),
+ command = dict(choices=ALL_COMMANDS),
+ uri = dict(default='qemu:///system'),
+ xml = dict(),
+ autostart = dict(type='bool')
+ ),
+ supports_check_mode = True
+ )
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ if not HAS_XML:
+ module.fail_json(
+ msg='The `lxml` module is not importable. Check the requirements.'
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/misc/virt_pool.py b/lib/ansible/modules/extras/cloud/misc/virt_pool.py
new file mode 100755
index 0000000000..b104ef548d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/misc/virt_pool.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: virt_pool
+author: "Maciej Delmanowski (@drybjed)"
+version_added: "2.0"
+short_description: Manage libvirt storage pools
+description:
+ - Manage I(libvirt) storage pools.
+options:
+ name:
+ required: false
+ aliases: [ "pool" ]
+ description:
+ - name of the storage pool being managed. Note that pool must be previously
+ defined with xml.
+ state:
+ required: false
+ choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ]
+ description:
+ - specify which state you want a storage pool to be in.
+ If 'active', pool will be started.
+ If 'present', ensure that pool is present but do not change its
+ state; if it's missing, you need to specify xml argument.
+ If 'inactive', pool will be stopped.
+ If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.
+ If 'deleted', pool contents will be deleted and then pool undefined.
+ command:
+ required: false
+ choices: [ "define", "build", "create", "start", "stop", "destroy",
+ "delete", "undefine", "get_xml", "list_pools", "facts",
+ "info", "status" ]
+ description:
+ - in addition to state management, various non-idempotent commands are available.
+ See examples.
+ autostart:
+ required: false
+ choices: ["yes", "no"]
+ description:
+ - Specify if a given storage pool should be started automatically on system boot.
+ uri:
+ required: false
+ default: "qemu:///system"
+ description:
+ - I(libvirt) connection uri.
+ xml:
+ required: false
+ description:
+ - XML document used with the define command.
+ mode:
+ required: false
+ choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]
+ description:
+ - Pass additional parameters to 'build' or 'delete' commands.
+requirements:
+ - "python >= 2.6"
+ - "python-libvirt"
+ - "python-lxml"
+'''
+
+EXAMPLES = '''
+# Define a new storage pool
+- virt_pool: command=define name=vms xml='{{ lookup("template", "pool/dir.xml.j2") }}'
+
+# Build a storage pool if it does not exist
+- virt_pool: command=build name=vms
+
+# Start a storage pool
+- virt_pool: command=create name=vms
+
+# List available pools
+- virt_pool: command=list_pools
+
+# Get XML data of a specified pool
+- virt_pool: command=get_xml name=vms
+
+# Stop a storage pool
+- virt_pool: command=destroy name=vms
+
+# Delete a storage pool (destroys contents)
+- virt_pool: command=delete name=vms
+
+# Undefine a storage pool
+- virt_pool: command=undefine name=vms
+
+# Gather facts about storage pools
+# Facts will be available as 'ansible_libvirt_pools'
+- virt_pool: command=facts
+
+# Gather information about pools managed by 'libvirt' remotely using uri
+- virt_pool: command=info uri='{{ item }}'
+ with_items: libvirt_uris
+ register: storage_pools
+
+# Ensure that a pool is active (needs to be defined and built first)
+- virt_pool: state=active name=vms
+
+# Ensure that a pool is inactive
+- virt_pool: state=inactive name=vms
+
+# Ensure that a given pool will be started at boot
+- virt_pool: autostart=yes name=vms
+
+# Disable autostart for a given pool
+- virt_pool: autostart=no name=vms
+'''
+
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE=2
+
+import sys
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+try:
+ from lxml import etree
+except ImportError:
+ HAS_XML = False
+else:
+ HAS_XML = True
+
+ALL_COMMANDS = []
+ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
+ 'undefine', 'destroy', 'get_xml', 'define', 'refresh']
+HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
+ALL_COMMANDS.extend(ENTRY_COMMANDS)
+ALL_COMMANDS.extend(HOST_COMMANDS)
+
+ENTRY_STATE_ACTIVE_MAP = {
+ 0 : "inactive",
+ 1 : "active"
+}
+
+ENTRY_STATE_AUTOSTART_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+ENTRY_STATE_PERSISTENT_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+ENTRY_STATE_INFO_MAP = {
+ 0 : "inactive",
+ 1 : "building",
+ 2 : "running",
+ 3 : "degraded",
+ 4 : "inaccessible"
+}
+
+ENTRY_BUILD_FLAGS_MAP = {
+ "new" : 0,
+ "repair" : 1,
+ "resize" : 2,
+ "no_overwrite" : 4,
+ "overwrite" : 8
+}
+
+ENTRY_DELETE_FLAGS_MAP = {
+ "normal" : 0,
+ "zeroed" : 1
+}
+
+ALL_MODES = []
+ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())
+ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())
+
+
+class EntryNotFound(Exception):
+ pass
+
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_entry(self, entryid):
+ # entryid = -1 returns a list of everything
+
+ results = []
+
+ # Get active entries
+ for name in self.conn.listStoragePools():
+ entry = self.conn.storagePoolLookupByName(name)
+ results.append(entry)
+
+ # Get inactive entries
+ for name in self.conn.listDefinedStoragePools():
+ entry = self.conn.storagePoolLookupByName(name)
+ results.append(entry)
+
+ if entryid == -1:
+ return results
+
+ for entry in results:
+ if entry.name() == entryid:
+ return entry
+
+ raise EntryNotFound("storage pool %s not found" % entryid)
+
+ def create(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).create()
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ except:
+ return self.module.exit_json(changed=True)
+ if not state:
+ return self.module.exit_json(changed=True)
+
+ def destroy(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).destroy()
+ else:
+ if self.find_entry(entryid).isActive():
+ return self.module.exit_json(changed=True)
+
+ def undefine(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).undefine()
+ else:
+ if not self.find_entry(entryid):
+ return self.module.exit_json(changed=True)
+
+ def get_status2(self, entry):
+ state = entry.isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+
+ def get_status(self, entryid):
+ if not self.module.check_mode:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ except:
+ return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
+
+ def get_uuid(self, entryid):
+ return self.find_entry(entryid).UUIDString()
+
+ def get_xml(self, entryid):
+ return self.find_entry(entryid).XMLDesc(0)
+
+ def get_info(self, entryid):
+ return self.find_entry(entryid).info()
+
+ def get_volume_count(self, entryid):
+ return self.find_entry(entryid).numOfVolumes()
+
+ def get_volume_names(self, entryid):
+ return self.find_entry(entryid).listVolumes()
+
+ def get_devices(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ if xml.xpath('/pool/source/device'):
+ result = []
+ for device in xml.xpath('/pool/source/device'):
+ result.append(device.get('path'))
+ try:
+ return result
+ except:
+ raise ValueError('No devices specified')
+
+ def get_format(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/pool/source/format')[0].get('type')
+ except:
+ raise ValueError('Format not specified')
+ return result
+
+ def get_host(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/pool/source/host')[0].get('name')
+ except:
+ raise ValueError('Host not specified')
+ return result
+
+ def get_source_path(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/pool/source/dir')[0].get('path')
+ except:
+ raise ValueError('Source path not specified')
+ return result
+
+ def get_path(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ return xml.xpath('/pool/target/path')[0].text
+
+ def get_type(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ return xml.get('type')
+
+ def build(self, entryid, flags):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).build(flags)
+ else:
+ try:
+ state = self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+ if not state:
+ return self.module.exit_json(changed=True)
+
+ def delete(self, entryid, flags):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).delete(flags)
+ else:
+ try:
+ state = self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+ if state:
+ return self.module.exit_json(changed=True)
+
+ def get_autostart(self, entryid):
+ state = self.find_entry(entryid).autostart()
+ return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
+
+ def get_autostart2(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).autostart()
+ else:
+ try:
+ return self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+
+ def set_autostart(self, entryid, val):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).setAutostart(val)
+ else:
+ try:
+ state = self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+ if bool(state) != val:
+ return self.module.exit_json(changed=True)
+
+ def refresh(self, entryid):
+ return self.find_entry(entryid).refresh()
+
+ def get_persistent(self, entryid):
+ state = self.find_entry(entryid).isPersistent()
+ return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
+
+ def define_from_xml(self, entryid, xml):
+ if not self.module.check_mode:
+ return self.conn.storagePoolDefineXML(xml)
+ else:
+ try:
+ state = self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+
+
+class VirtStoragePool(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+ self.conn = LibvirtConnection(self.uri, self.module)
+
+ def get_pool(self, entryid):
+ return self.conn.find_entry(entryid)
+
+ def list_pools(self, state=None):
+ results = []
+ for entry in self.conn.find_entry(-1):
+ if state:
+ if state == self.conn.get_status2(entry):
+ results.append(entry.name())
+ else:
+ results.append(entry.name())
+ return results
+
+ def state(self):
+ results = []
+ for entry in self.list_pools():
+ state_blurb = self.conn.get_status(entry)
+ results.append("%s %s" % (entry,state_blurb))
+ return results
+
+ def autostart(self, entryid):
+ return self.conn.set_autostart(entryid, True)
+
+ def get_autostart(self, entryid):
+ return self.conn.get_autostart2(entryid)
+
+ def set_autostart(self, entryid, state):
+ return self.conn.set_autostart(entryid, state)
+
+ def create(self, entryid):
+ return self.conn.create(entryid)
+
+ def start(self, entryid):
+ return self.conn.create(entryid)
+
+ def stop(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def destroy(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def undefine(self, entryid):
+ return self.conn.undefine(entryid)
+
+ def status(self, entryid):
+ return self.conn.get_status(entryid)
+
+ def get_xml(self, entryid):
+ return self.conn.get_xml(entryid)
+
+ def define(self, entryid, xml):
+ return self.conn.define_from_xml(entryid, xml)
+
+ def build(self, entryid, flags):
+ return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
+
+ def delete(self, entryid, flags):
+ return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
+
+ def refresh(self, entryid):
+ return self.conn.refresh(entryid)
+
+ def info(self):
+ return self.facts(facts_mode='info')
+
+ def facts(self, facts_mode='facts'):
+ results = dict()
+ for entry in self.list_pools():
+ results[entry] = dict()
+ if self.conn.find_entry(entry):
+ data = self.conn.get_info(entry)
+ # libvirt returns maxMem, memory, and cpuTime as long()'s, which
+ # xmlrpclib tries to convert to regular int's during serialization.
+ # This throws exceptions, so convert them to strings here and
+ # assume the other end of the xmlrpc connection can figure things
+ # out or doesn't care.
+ results[entry] = {
+ "status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
+ "size_total" : str(data[1]),
+ "size_used" : str(data[2]),
+ "size_available" : str(data[3]),
+ }
+ results[entry]["autostart"] = self.conn.get_autostart(entry)
+ results[entry]["persistent"] = self.conn.get_persistent(entry)
+ results[entry]["state"] = self.conn.get_status(entry)
+ results[entry]["path"] = self.conn.get_path(entry)
+ results[entry]["type"] = self.conn.get_type(entry)
+ results[entry]["uuid"] = self.conn.get_uuid(entry)
+ if self.conn.find_entry(entry).isActive():
+ results[entry]["volume_count"] = self.conn.get_volume_count(entry)
+ results[entry]["volumes"] = list()
+ for volume in self.conn.get_volume_names(entry):
+ results[entry]["volumes"].append(volume)
+ else:
+ results[entry]["volume_count"] = -1
+
+ try:
+ results[entry]["host"] = self.conn.get_host(entry)
+ except ValueError as e:
+ pass
+
+ try:
+ results[entry]["source_path"] = self.conn.get_source_path(entry)
+ except ValueError as e:
+ pass
+
+ try:
+ results[entry]["format"] = self.conn.get_format(entry)
+ except ValueError as e:
+ pass
+
+ try:
+ devices = self.conn.get_devices(entry)
+ results[entry]["devices"] = devices
+ except ValueError as e:
+ pass
+
+ else:
+ results[entry]["state"] = self.conn.get_status(entry)
+
+ facts = dict()
+ if facts_mode == 'facts':
+ facts["ansible_facts"] = dict()
+ facts["ansible_facts"]["ansible_libvirt_pools"] = results
+ elif facts_mode == 'info':
+ facts['pools'] = results
+ return facts
+
+
+def core(module):
+
+ state = module.params.get('state', None)
+ name = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
+ autostart = module.params.get('autostart', None)
+ mode = module.params.get('mode', None)
+
+ v = VirtStoragePool(uri, module)
+ res = {}
+
+ if state and command == 'list_pools':
+ res = v.list_pools(state=state)
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ if state:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if state in [ 'active' ]:
+ if v.status(name) is not 'active':
+ res['changed'] = True
+ res['msg'] = v.start(name)
+ elif state in [ 'present' ]:
+ try:
+ v.get_pool(name)
+ except EntryNotFound:
+ if not xml:
+ module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ elif state in [ 'inactive' ]:
+ entries = v.list_pools()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ res['changed'] = True
+ res['msg'] = v.destroy(name)
+ elif state in [ 'undefined', 'absent' ]:
+ entries = v.list_pools()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ v.destroy(name)
+ res['changed'] = True
+ res['msg'] = v.undefine(name)
+ elif state in [ 'deleted' ]:
+ entries = v.list_pools()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ v.destroy(name)
+ v.delete(name, mode)
+ res['changed'] = True
+ res['msg'] = v.undefine(name)
+ else:
+ module.fail_json(msg="unexpected state")
+
+ return VIRT_SUCCESS, res
+
+ if command:
+ if command in ENTRY_COMMANDS:
+ if not name:
+ module.fail_json(msg = "%s requires 1 argument: name" % command)
+ if command == 'define':
+ if not xml:
+ module.fail_json(msg = "define requires xml argument")
+ try:
+ v.get_pool(name)
+ except EntryNotFound:
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ return VIRT_SUCCESS, res
+ elif command == 'build':
+ res = v.build(name, mode)
+ if type(res) != dict:
+ res = { 'changed': True, command: res }
+ return VIRT_SUCCESS, res
+ elif command == 'delete':
+ res = v.delete(name, mode)
+ if type(res) != dict:
+ res = { 'changed': True, command: res }
+ return VIRT_SUCCESS, res
+ res = getattr(v, command)(name)
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ elif hasattr(v, command):
+ res = getattr(v, command)()
+ if type(res) != dict:
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ else:
+ module.fail_json(msg="Command %s not recognized" % basecmd)
+
+ if autostart is not None:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if autostart:
+ if not v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, True)
+ else:
+ if v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, False)
+
+ return VIRT_SUCCESS, res
+
+ module.fail_json(msg="expected state or command parameter to be specified")
+
+
+def main():
+
+ module = AnsibleModule (
+ argument_spec = dict(
+ name = dict(aliases=['pool']),
+ state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
+ command = dict(choices=ALL_COMMANDS),
+ uri = dict(default='qemu:///system'),
+ xml = dict(),
+ autostart = dict(type='bool'),
+ mode = dict(choices=ALL_MODES),
+ ),
+ supports_check_mode = True
+ )
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ if not HAS_XML:
+ module.fail_json(
+ msg='The `lxml` module is not importable. Check the requirements.'
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/__init__.py b/lib/ansible/modules/extras/cloud/openstack/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_flavor_facts.py b/lib/ansible/modules/extras/cloud/openstack/os_flavor_facts.py
new file mode 100644
index 0000000000..05a3782be7
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_flavor_facts.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+
+DOCUMENTATION = '''
+---
+module: os_flavor_facts
+short_description: Retrieve facts about one or more flavors
+author: "David Shrewsbury (@Shrews)"
+version_added: "2.1"
+description:
+ - Retrieve facts about available OpenStack instance flavors. By default,
+ facts about ALL flavors are retrieved. Filters can be applied to get
+ facts for only matching flavors. For example, you can filter on the
+ amount of RAM available to the flavor, or the number of virtual CPUs
+ available to the flavor, or both. When specifying multiple filters,
+ *ALL* filters must match on a flavor before that flavor is returned as
+ a fact.
+notes:
+ - This module creates a new top-level C(openstack_flavors) fact, which
+ contains a list of unsorted flavors.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - A flavor name. Cannot be used with I(ram) or I(vcpus).
+ required: false
+ default: None
+ ram:
+ description:
+ - "A string used for filtering flavors based on the amount of RAM
+ (in MB) desired. This string accepts the following special values:
+ 'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
+ (return flavors with the maximum amount of RAM)."
+
+ - "A specific amount of RAM may also be specified. Any flavors with this
+ exact amount of RAM will be returned."
+
+ - "A range of acceptable RAM may be given using a special syntax. Simply
+ prefix the amount of RAM with one of these acceptable range values:
+ '<', '>', '<=', '>='. These values represent less than, greater than,
+ less than or equal to, and greater than or equal to, respectively."
+ required: false
+ default: false
+ vcpus:
+ description:
+ - A string used for filtering flavors based on the number of virtual
+ CPUs desired. Format is the same as the I(ram) parameter.
+ required: false
+ default: false
+ limit:
+ description:
+ - Limits the number of flavors returned. All matching flavors are
+ returned by default.
+ required: false
+ default: None
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about all available flavors
+- os_flavor_facts:
+ cloud: mycloud
+
+# Gather facts for the flavor named "xlarge-flavor"
+- os_flavor_facts:
+ cloud: mycloud
+ name: "xlarge-flavor"
+
+# Get all flavors that have exactly 512 MB of RAM.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: "512"
+
+# Get all flavors that have 1024 MB or more of RAM.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: ">=1024"
+
+# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
+# option will guarantee only a single flavor is returned.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: "MIN"
+ limit: 1
+
+# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: ">=1024"
+ vcpus: "2"
+'''
+
+
+RETURN = '''
+openstack_flavors:
+ description: Dictionary describing the flavors.
+ returned: On success.
+ type: dictionary
+ contains:
+ id:
+ description: Flavor ID.
+ returned: success
+ type: string
+ sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
+ name:
+ description: Flavor name.
+ returned: success
+ type: string
+ sample: "tiny"
+ disk:
+ description: Size of local disk, in GB.
+ returned: success
+ type: int
+ sample: 10
+ ephemeral:
+ description: Ephemeral space size, in GB.
+ returned: success
+ type: int
+ sample: 10
+ ram:
+ description: Amount of memory, in MB.
+ returned: success
+ type: int
+ sample: 1024
+ swap:
+ description: Swap space size, in MB.
+ returned: success
+ type: int
+ sample: 100
+ vcpus:
+ description: Number of virtual CPUs.
+ returned: success
+ type: int
+ sample: 2
+ is_public:
+ description: Make flavor accessible to the public.
+ returned: success
+ type: bool
+ sample: true
+'''
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ ram=dict(required=False, default=None),
+ vcpus=dict(required=False, default=None),
+ limit=dict(required=False, default=None, type='int'),
+ )
+ module_kwargs = openstack_module_kwargs(
+ mutually_exclusive=[
+ ['name', 'ram'],
+ ['name', 'vcpus'],
+ ]
+ )
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ vcpus = module.params['vcpus']
+ ram = module.params['ram']
+ limit = module.params['limit']
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ if name:
+ flavors = cloud.search_flavors(filters={'name': name})
+
+ else:
+ flavors = cloud.list_flavors()
+ filters = {}
+ if vcpus:
+ filters['vcpus'] = vcpus
+ if ram:
+ filters['ram'] = ram
+ if filters:
+ # Range search added in 1.5.0
+ if StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
+ module.fail_json(msg="Shade >= 1.5.0 needed for this functionality")
+ flavors = cloud.range_search(flavors, filters)
+
+ if limit is not None:
+ flavors = flavors[:limit]
+
+ module.exit_json(changed=False,
+ ansible_facts=dict(openstack_flavors=flavors))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_group.py b/lib/ansible/modules/extras/cloud/openstack/os_group.py
new file mode 100644
index 0000000000..4f317abccd
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_group.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_group
+short_description: Manage OpenStack Identity Groups
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
+description:
+ - Manage OpenStack Identity Groups. Groups can be created, deleted or
+ updated. Only the I(description) value can be updated.
+options:
+ name:
+ description:
+ - Group name
+ required: true
+ description:
+ description:
+ - Group description
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a group named "demo"
+- os_group:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: "Demo Group"
+
+# Update the description on existing "demo" group
+- os_group:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: "Something else"
+
+# Delete group named "demo"
+- os_group:
+ cloud: mycloud
+ state: absent
+ name: demo
+'''
+
+RETURN = '''
+group:
+ description: Dictionary describing the group.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique group ID
+ type: string
+ sample: "ee6156ff04c645f481a6738311aea0b0"
+ name:
+ description: Group name
+ type: string
+ sample: "demo"
+ description:
+ description: Group description
+ type: string
+ sample: "Demo Group"
+ domain_id:
+ description: Domain for the group
+ type: string
+ sample: "default"
+'''
+
+
+def _system_state_change(state, description, group):
+ if state == 'present' and not group:
+ return True
+ if state == 'present' and description is not None and group.description != description:
+ return True
+ if state == 'absent' and group:
+ return True
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ description=dict(required=False, default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params.pop('name')
+ description = module.params.pop('description')
+ state = module.params.pop('state')
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+ group = cloud.get_group(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, description, group))
+
+ if state == 'present':
+ if group is None:
+ group = cloud.create_group(
+ name=name, description=description)
+ changed = True
+ else:
+ if description is not None and group.description != description:
+ group = cloud.update_group(
+ group.id, description=description)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, group=group)
+
+ elif state == 'absent':
+ if group is None:
+ changed=False
+ else:
+ cloud.delete_group(group.id)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_ironic_inspect.py b/lib/ansible/modules/extras/cloud/openstack/os_ironic_inspect.py
new file mode 100644
index 0000000000..5299da0933
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_ironic_inspect.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+DOCUMENTATION = '''
+---
+module: os_ironic_inspect
+short_description: Explicitly triggers baremetal node introspection in ironic.
+extends_documentation_fragment: openstack
+author: "Julia Kreger (@juliakreger)"
+version_added: "2.1"
+description:
+ - Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
+ This command may be out of band or in-band depending on the ironic driver configuration.
+ This is only possible on nodes in 'manageable' and 'available' state.
+options:
+ mac:
+ description:
+ - unique mac address that is used to attempt to identify the host.
+ required: false
+ default: None
+ uuid:
+ description:
+ - globally unique identifier (UUID) to identify the host.
+ required: false
+ default: None
+ name:
+ description:
+ - unique name identifier to identify the host in Ironic.
+ required: false
+ default: None
+ ironic_url:
+ description:
+ - If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
+ Use with "auth" and "auth_type" settings set to None.
+ required: false
+ default: None
+ timeout:
+ description:
+ - A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
+ required: false
+ default: 1200
+
+requirements: ["shade"]
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary of new facts representing discovered properties of the node..
+ returned: changed
+ type: dictionary
+ contains:
+ memory_mb:
+ description: Amount of node memory as updated in the node properties
+ type: string
+ sample: "1024"
+ cpu_arch:
+ description: Detected CPU architecture type
+ type: string
+ sample: "x86_64"
+ local_gb:
+ description: Total size of local disk storage as updaed in node properties.
+ type: string
+ sample: "10"
+ cpus:
+ description: Count of cpu cores defined in the updated node properties.
+ type: string
+ sample: "1"
+'''
+
+EXAMPLES = '''
+# Invoke node inspection
+- os_ironic_inspect:
+ name: "testnode1"
+'''
+
+
+def _choose_id_value(module):
+ if module.params['uuid']:
+ return module.params['uuid']
+ if module.params['name']:
+ return module.params['name']
+ return None
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ auth_type=dict(required=False),
+ uuid=dict(required=False),
+ name=dict(required=False),
+ mac=dict(required=False),
+ ironic_url=dict(required=False),
+ timeout=dict(default=1200, type='int', required=False),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) < StrictVersion('1.0.0'):
+ module.fail_json(msg="To utilize this module, the installed version of"
+ "the shade library MUST be >=1.0.0")
+
+ if (module.params['auth_type'] in [None, 'None'] and
+ module.params['ironic_url'] is None):
+ module.fail_json(msg="Authentication appears to be disabled, "
+ "Please define an ironic_url parameter")
+
+ if (module.params['ironic_url'] and
+ module.params['auth_type'] in [None, 'None']):
+ module.params['auth'] = dict(
+ endpoint=module.params['ironic_url']
+ )
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ if module.params['name'] or module.params['uuid']:
+ server = cloud.get_machine(_choose_id_value(module))
+ elif module.params['mac']:
+ server = cloud.get_machine_by_mac(module.params['mac'])
+ else:
+ module.fail_json(msg="The worlds did not align, "
+ "the host was not found as "
+ "no name, uuid, or mac was "
+ "defined.")
+ if server:
+ cloud.inspect_machine(server['uuid'], module.params['wait'])
+ # TODO(TheJulia): diff properties, ?and ports? and determine
+ # if a change occured. In theory, the node is always changed
+ # if introspection is able to update the record.
+ module.exit_json(changed=True,
+ ansible_facts=server['properties'])
+
+ else:
+ module.fail_json(msg="node not found.")
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_keystone_domain.py b/lib/ansible/modules/extras/cloud/openstack/os_keystone_domain.py
new file mode 100644
index 0000000000..bed2f0410c
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_keystone_domain.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_keystone_domain
+short_description: Manage OpenStack Identity Domains
+extends_documentation_fragment: openstack
+version_added: "2.1"
+description:
+ - Create, update, or delete OpenStack Identity domains. If a domain
+ with the supplied name already exists, it will be updated with the
+ new description and enabled attributes.
+options:
+ name:
+ description:
+ - Name that has to be given to the instance
+ required: true
+ description:
+ description:
+ - Description of the domain
+ required: false
+ default: None
+ enabled:
+ description:
+ - Is the domain enabled
+ required: false
+ default: True
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a domain
+- os_keystone_domain:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: Demo Domain
+
+# Delete a domain
+- os_keystone_domain:
+ cloud: mycloud
+ state: absent
+ name: demo
+'''
+
+RETURN = '''
+domain:
+ description: Dictionary describing the domain.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Domain ID.
+ type: string
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ name:
+ description: Domain name.
+ type: string
+ sample: "demo"
+ description:
+ description: Domain description.
+ type: string
+ sample: "Demo Domain"
+ enabled:
+ description: Domain description.
+ type: boolean
+ sample: True
+
+id:
+ description: The domain ID.
+ returned: On success when I(state) is 'present'
+ type: string
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+'''
+
+def _needs_update(module, domain):
+ if domain.description != module.params['description']:
+ return True
+ if domain.enabled != module.params['enabled']:
+ return True
+ return False
+
+def _system_state_change(module, domain):
+ state = module.params['state']
+ if state == 'absent' and domain:
+ return True
+
+ if state == 'present':
+ if domain is None:
+ return True
+ return _needs_update(module, domain)
+
+ return False
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ description=dict(default=None),
+ enabled=dict(default=True, type='bool'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ description = module.params['description']
+ enabled = module.params['enabled']
+ state = module.params['state']
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ domains = cloud.search_domains(filters=dict(name=name))
+
+ if len(domains) > 1:
+ module.fail_json(msg='Domain name %s is not unique' % name)
+ elif len(domains) == 1:
+ domain = domains[0]
+ else:
+ domain = None
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, domain))
+
+ if state == 'present':
+ if domain is None:
+ domain = cloud.create_domain(
+ name=name, description=description, enabled=enabled)
+ changed = True
+ else:
+ if _needs_update(module, domain):
+ domain = cloud.update_domain(
+ domain.id, name=name, description=description,
+ enabled=enabled)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, domain=domain, id=domain.id)
+
+ elif state == 'absent':
+ if domain is None:
+ changed=False
+ else:
+ cloud.delete_domain(domain.id)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_keystone_domain_facts.py b/lib/ansible/modules/extras/cloud/openstack/os_keystone_domain_facts.py
new file mode 100644
index 0000000000..5df2f2b797
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_keystone_domain_facts.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_keystone_domain_facts
+short_description: Retrieve facts about one or more OpenStack domains
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Retrieve facts about a one or more OpenStack domains
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - Name or ID of the domain
+ required: true
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created domain
+- os_keystone_domain_facts:
+ cloud: awesomecloud
+- debug: var=openstack_domains
+
+# Gather facts about a previously created domain by name
+- os_keystone_domain_facts:
+ cloud: awesomecloud
+ name: demodomain
+- debug: var=openstack_domains
+
+# Gather facts about a previously created domain with filter
+- os_keystone_domain_facts
+ cloud: awesomecloud
+ name: demodomain
+ filters:
+ enabled: False
+- debug: var=openstack_domains
+'''
+
+
+RETURN = '''
+openstack_domains:
+ description: has all the OpenStack facts about domains
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the domain.
+ returned: success
+ type: string
+ description:
+ description: Description of the domain.
+ returned: success
+ type: string
+ enabled:
+ description: Flag to indicate if the domain is enabled.
+ returned: success
+ type: bool
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ filters=dict(required=False, type='dict', default=None),
+ )
+ module_kwargs = openstack_module_kwargs(
+ mutually_exclusive=[
+ ['name', 'filters'],
+ ]
+ )
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ name = module.params['name']
+ filters = module.params['filters']
+
+ opcloud = shade.operator_cloud(**module.params)
+
+ if name:
+ # Let's suppose user is passing domain ID
+ try:
+ domains = cloud.get_domain(name)
+ except:
+ domains = opcloud.search_domains(filters={'name': name})
+
+ else:
+ domains = opcloud.search_domains(filters)
+
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_domains=domains))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_keystone_role.py b/lib/ansible/modules/extras/cloud/openstack/os_keystone_role.py
new file mode 100644
index 0000000000..def91a8b32
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_keystone_role.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_keystone_role
+short_description: Manage OpenStack Identity Roles
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
+description:
+ - Manage OpenStack Identity Roles.
+options:
+ name:
+ description:
+ - Role Name
+ required: true
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a role named "demo"
+- os_keystone_role:
+ cloud: mycloud
+ state: present
+ name: demo
+
+# Delete the role named "demo"
+- os_keystone_role:
+ cloud: mycloud
+ state: absent
+ name: demo
+'''
+
+RETURN = '''
+role:
+ description: Dictionary describing the role.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique role ID.
+ type: string
+ sample: "677bfab34c844a01b88a217aa12ec4c2"
+ name:
+ description: Role name.
+ type: string
+ sample: "demo"
+'''
+
+
+def _system_state_change(state, role):
+ if state == 'present' and not role:
+ return True
+ if state == 'absent' and role:
+ return True
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params.pop('name')
+ state = module.params.pop('state')
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ role = cloud.get_role(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, role))
+
+ if state == 'present':
+ if role is None:
+ role = cloud.create_role(name)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, role=role)
+ elif state == 'absent':
+ if role is None:
+ changed=False
+ else:
+ cloud.delete_role(name)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_keystone_service.py b/lib/ansible/modules/extras/cloud/openstack/os_keystone_service.py
new file mode 100644
index 0000000000..4e3e46cc5c
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_keystone_service.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# Copyright 2016 Sam Yaple
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+DOCUMENTATION = '''
+---
+module: os_keystone_service
+short_description: Manage OpenStack Identity services
+extends_documentation_fragment: openstack
+author: "Sam Yaple (@SamYaple)"
+version_added: "2.2"
+description:
+ - Create, update, or delete OpenStack Identity service. If a service
+ with the supplied name already exists, it will be updated with the
+ new description and enabled attributes.
+options:
+ name:
+ description:
+ - Name of the service
+ required: true
+ description:
+ description:
+ - Description of the service
+ required: false
+ default: None
+ enabled:
+ description:
+ - Is the service enabled
+ required: false
+ default: True
+ service_type:
+ description:
+ - The type of service
+ required: true
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a service for glance
+- os_keystone_service:
+ cloud: mycloud
+ state: present
+ name: glance
+ service_type: image
+ description: OpenStack Image Service
+# Delete a service
+- os_keystone_service:
+ cloud: mycloud
+ state: absent
+ name: glance
+ service_type: image
+'''
+
+RETURN = '''
+service:
+ description: Dictionary describing the service.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Service ID.
+ type: string
+ sample: "3292f020780b4d5baf27ff7e1d224c44"
+ name:
+ description: Service name.
+ type: string
+ sample: "glance"
+ service_type:
+ description: Service type.
+ type: string
+ sample: "image"
+ description:
+ description: Service description.
+ type: string
+ sample: "OpenStack Image Service"
+ enabled:
+ description: Service status.
+ type: boolean
+ sample: True
+id:
+ description: The service ID.
+ returned: On success when I(state) is 'present'
+ type: string
+ sample: "3292f020780b4d5baf27ff7e1d224c44"
+'''
+
+
+def _needs_update(module, service):
+ if service.enabled != module.params['enabled']:
+ return True
+ if service.description is not None and \
+ service.description != module.params['description']:
+ return True
+ return False
+
+
+def _system_state_change(module, service):
+ state = module.params['state']
+ if state == 'absent' and service:
+ return True
+
+ if state == 'present':
+ if service is None:
+ return True
+ return _needs_update(module, service)
+
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ description=dict(default=None),
+ enabled=dict(default=True, type='bool'),
+ name=dict(required=True),
+ service_type=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
+ module.fail_json(msg="To utilize this module, the installed version of"
+ "the shade library MUST be >=1.6.0")
+
+ description = module.params['description']
+ enabled = module.params['enabled']
+ name = module.params['name']
+ state = module.params['state']
+ service_type = module.params['service_type']
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ services = cloud.search_services(name_or_id=name,
+ filters=dict(type=service_type))
+
+ if len(services) > 1:
+ module.fail_json(msg='Service name %s and type %s are not unique' %
+ (name, service_type))
+ elif len(services) == 1:
+ service = services[0]
+ else:
+ service = None
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, service))
+
+ if state == 'present':
+ if service is None:
+ service = cloud.create_service(name=name,
+ description=description, type=service_type, enabled=True)
+ changed = True
+ else:
+ if _needs_update(module, service):
+ service = cloud.update_service(
+ service.id, name=name, type=service_type, enabled=enabled,
+ description=description)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, service=service, id=service.id)
+
+ elif state == 'absent':
+ if service is None:
+ changed=False
+ else:
+ cloud.delete_service(service.id)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_port_facts.py b/lib/ansible/modules/extras/cloud/openstack/os_port_facts.py
new file mode 100644
index 0000000000..e304821187
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_port_facts.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+module: os_port_facts
+short_description: Retrieve facts about ports within OpenStack.
+version_added: "2.1"
+author: "David Shrewsbury (@Shrews)"
+description:
+ - Retrieve facts about ports from OpenStack.
+notes:
+ - Facts are placed in the C(openstack_ports) variable.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ port:
+ description:
+ - Unique name or ID of a port.
+ required: false
+ default: null
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements
+ of this dictionary will be matched against the returned port
+ dictionaries. Matching is currently limited to strings within
+ the port dictionary, or strings within nested dictionaries.
+ required: false
+ default: null
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about all ports
+- os_port_facts:
+ cloud: mycloud
+
+# Gather facts about a single port
+- os_port_facts:
+ cloud: mycloud
+ port: 6140317d-e676-31e1-8a4a-b1913814a471
+
+# Gather facts about all ports that have device_id set to a specific value
+# and with a status of ACTIVE.
+- os_port_facts:
+ cloud: mycloud
+ filters:
+ device_id: 1038a010-3a37-4a9d-82ea-652f1da36597
+ status: ACTIVE
+'''
+
+RETURN = '''
+openstack_ports:
+ description: List of port dictionaries. A subset of the dictionary keys
+ listed below may be returned, depending on your cloud provider.
+ returned: always, but can be null
+ type: complex
+ contains:
+ admin_state_up:
+ description: The administrative state of the router, which is
+ up (true) or down (false).
+ returned: success
+ type: boolean
+ sample: true
+ allowed_address_pairs:
+ description: A set of zero or more allowed address pairs. An
+ address pair consists of an IP address and MAC address.
+ returned: success
+ type: list
+ sample: []
+ "binding:host_id":
+ description: The UUID of the host where the port is allocated.
+ returned: success
+ type: string
+ sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
+ "binding:profile":
+ description: A dictionary the enables the application running on
+ the host to pass and receive VIF port-specific
+ information to the plug-in.
+ returned: success
+ type: dict
+ sample: {}
+ "binding:vif_details":
+ description: A dictionary that enables the application to pass
+ information about functions that the Networking API
+ provides.
+ returned: success
+ type: dict
+ sample: {"port_filter": true}
+ "binding:vif_type":
+ description: The VIF type for the port.
+ returned: success
+ type: dict
+ sample: "ovs"
+ "binding:vnic_type":
+ description: The virtual network interface card (vNIC) type that is
+ bound to the neutron port.
+ returned: success
+ type: string
+ sample: "normal"
+ device_id:
+ description: The UUID of the device that uses this port.
+ returned: success
+ type: string
+ sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
+ device_owner:
+ description: The UUID of the entity that uses this port.
+ returned: success
+ type: string
+ sample: "network:router_interface"
+ dns_assignment:
+ description: DNS assignment information.
+ returned: success
+ type: list
+ dns_name:
+ description: DNS name
+ returned: success
+ type: string
+ sample: ""
+ extra_dhcp_opts:
+ description: A set of zero or more extra DHCP option pairs.
+ An option pair consists of an option value and name.
+ returned: success
+ type: list
+ sample: []
+ fixed_ips:
+ description: The IP addresses for the port. Includes the IP address
+ and UUID of the subnet.
+ returned: success
+ type: list
+ id:
+ description: The UUID of the port.
+ returned: success
+ type: string
+ sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
+ ip_address:
+ description: The IP address.
+ returned: success
+ type: string
+ sample: "127.0.0.1"
+ mac_address:
+ description: The MAC address.
+ returned: success
+ type: string
+ sample: "00:00:5E:00:53:42"
+ name:
+ description: The port name.
+ returned: success
+ type: string
+ sample: "port_name"
+ network_id:
+ description: The UUID of the attached network.
+ returned: success
+ type: string
+ sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
+ port_security_enabled:
+ description: The port security status. The status is enabled (true) or disabled (false).
+ returned: success
+ type: boolean
+ sample: false
+ security_groups:
+ description: The UUIDs of any attached security groups.
+ returned: success
+ type: list
+ status:
+ description: The port status.
+ returned: success
+ type: string
+ sample: "ACTIVE"
+ tenant_id:
+ description: The UUID of the tenant who owns the network.
+ returned: success
+ type: string
+ sample: "51fce036d7984ba6af4f6c849f65ef00"
+'''
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ port=dict(required=False),
+ filters=dict(type='dict', required=False),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ port = module.params.pop('port')
+ filters = module.params.pop('filters')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ ports = cloud.search_ports(port, filters)
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_ports=ports))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_project.py b/lib/ansible/modules/extras/cloud/openstack/os_project.py
new file mode 100644
index 0000000000..4c686724c8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_project.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# Copyright (c) 2015 IBM Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+DOCUMENTATION = '''
+---
+module: os_project
+short_description: Manage OpenStack Projects
+extends_documentation_fragment: openstack
+version_added: "2.0"
+author: "Alberto Gireud (@agireud)"
+description:
+ - Manage OpenStack Projects. Projects can be created,
+ updated or deleted using this module. A project will be updated
+ if I(name) matches an existing project and I(state) is present.
+ The value for I(name) cannot be updated without deleting and
+ re-creating the project.
+options:
+ name:
+ description:
+ - Name for the project
+ required: true
+ description:
+ description:
+ - Description for the project
+ required: false
+ default: None
+ domain_id:
+ description:
+ - Domain id to create the project in if the cloud supports domains.
+ The domain_id parameter requires shade >= 1.8.0
+ required: false
+ default: None
+ aliases: ['domain']
+ enabled:
+ description:
+ - Is the project enabled
+ required: false
+ default: True
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a project
+- os_project:
+ cloud: mycloud
+ state: present
+ name: demoproject
+ description: demodescription
+ domain_id: demoid
+ enabled: True
+
+# Delete a project
+- os_project:
+ cloud: mycloud
+ state: absent
+ name: demoproject
+'''
+
+
+RETURN = '''
+project:
+ description: Dictionary describing the project.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Project ID
+ type: string
+ sample: "f59382db809c43139982ca4189404650"
+ name:
+ description: Project name
+ type: string
+ sample: "demoproject"
+ description:
+ description: Project description
+ type: string
+ sample: "demodescription"
+ enabled:
+ description: Boolean to indicate if project is enabled
+ type: bool
+ sample: True
+'''
+
+def _needs_update(module, project):
+ keys = ('description', 'enabled')
+ for key in keys:
+ if module.params[key] is not None and module.params[key] != project.get(key):
+ return True
+
+ return False
+
+def _system_state_change(module, project):
+ state = module.params['state']
+ if state == 'present':
+ if project is None:
+ changed = True
+ else:
+ if _needs_update(module, project):
+ changed = True
+ else:
+ changed = False
+
+ elif state == 'absent':
+ if project is None:
+ changed=False
+ else:
+ changed=True
+
+ return changed;
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ description=dict(required=False, default=None),
+ domain_id=dict(required=False, default=None, aliases=['domain']),
+ enabled=dict(default=True, type='bool'),
+ state=dict(default='present', choices=['absent', 'present'])
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ **module_kwargs
+ )
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ description = module.params['description']
+ domain = module.params.pop('domain_id')
+ enabled = module.params['enabled']
+ state = module.params['state']
+
+ if domain and StrictVersion(shade.__version__) < StrictVersion('1.8.0'):
+ module.fail_json(msg="The domain argument requires shade >=1.8.0")
+
+ try:
+ if domain:
+ opcloud = shade.operator_cloud(**module.params)
+ try:
+ # We assume admin is passing domain id
+ dom = opcloud.get_domain(domain)['id']
+ domain = dom
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ try:
+ dom = opcloud.search_domains(filters={'name': domain})[0]['id']
+ domain = dom
+ except:
+ # Ok, let's hope the user is non-admin and passing a sane id
+ pass
+
+ cloud = shade.openstack_cloud(**module.params)
+
+ if domain:
+ project = cloud.get_project(name, domain_id=domain)
+ else:
+ project = cloud.get_project(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, project))
+
+ if state == 'present':
+ if project is None:
+ project = cloud.create_project(
+ name=name, description=description,
+ domain_id=domain,
+ enabled=enabled)
+ changed = True
+ else:
+ if _needs_update(module, project):
+ project = cloud.update_project(
+ project['id'], description=description,
+ enabled=enabled)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, project=project)
+
+ elif state == 'absent':
+ if project is None:
+ changed=False
+ else:
+ cloud.delete_project(project['id'])
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message, extra_data=e.extra_data)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_project_facts.py b/lib/ansible/modules/extras/cloud/openstack/os_project_facts.py
new file mode 100644
index 0000000000..87d3a1e9d7
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_project_facts.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_project_facts
+short_description: Retrieve facts about one or more OpenStack projects
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Retrieve facts about a one or more OpenStack projects
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - Name or ID of the project
+ required: true
+ domain:
+ description:
+ - Name or ID of the domain containing the project if the cloud supports domains
+ required: false
+ default: None
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created projects
+- os_project_facts:
+ cloud: awesomecloud
+- debug: var=openstack_projects
+
+# Gather facts about a previously created project by name
+- os_project_facts:
+ cloud: awesomecloud
+ name: demoproject
+- debug: var=openstack_projects
+
+# Gather facts about a previously created project in a specific domain
+- os_project_facts
+ cloud: awesomecloud
+ name: demoproject
+ domain: admindomain
+- debug: var=openstack_projects
+
+# Gather facts about a previously created project in a specific domain
+ with filter
+- os_project_facts
+ cloud: awesomecloud
+ name: demoproject
+ domain: admindomain
+ filters:
+ enabled: False
+- debug: var=openstack_projects
+'''
+
+
+RETURN = '''
+openstack_projects:
+ description: has all the OpenStack facts about projects
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the project.
+ returned: success
+ type: string
+ description:
+ description: Description of the project
+ returned: success
+ type: string
+ enabled:
+ description: Flag to indicate if the project is enabled
+ returned: success
+ type: bool
+ domain_id:
+ description: Domain ID containing the project (keystone v3 clouds only)
+ returned: success
+ type: bool
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ domain=dict(required=False, default=None),
+ filters=dict(required=False, type='dict', default=None),
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ name = module.params['name']
+ domain = module.params['domain']
+ filters = module.params['filters']
+
+ opcloud = shade.operator_cloud(**module.params)
+
+ if domain:
+ try:
+ # We assume admin is passing domain id
+ dom = opcloud.get_domain(domain)['id']
+ domain = dom
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ dom = opcloud.search_domains(filters={'name': domain})
+ if dom:
+ domain = dom[0]['id']
+ else:
+ module.fail_json(msg='Domain name or ID does not exist')
+
+ if not filters:
+ filters = {}
+
+ filters['domain_id'] = domain
+
+ projects = opcloud.search_projects(name, filters)
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_projects=projects))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_recordset.py b/lib/ansible/modules/extras/cloud/openstack/os_recordset.py
new file mode 100644
index 0000000000..0e86020716
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_recordset.py
@@ -0,0 +1,242 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+DOCUMENTATION = '''
+---
+module: os_recordset
+short_description: Manage OpenStack DNS recordsets
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
+ updated. Only the I(records), I(description), and I(ttl) values
+ can be updated.
+options:
+ zone:
+ description:
+ - Zone managing the recordset
+ required: true
+ name:
+ description:
+ - Name of the recordset
+ required: true
+ recordset_type:
+ description:
+ - Recordset type
+ required: true
+ records:
+ description:
+ - List of recordset definitions
+ required: true
+ description:
+ description:
+ - Description of the recordset
+ required: false
+ default: None
+ ttl:
+ description:
+ - TTL (Time To Live) value in seconds
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a recordset named "www.example.net."
+- os_recordset:
+ cloud: mycloud
+ state: present
+ zone: example.net.
+ name: www
+ recordset_type: primary
+ records: ['10.1.1.1']
+ description: test recordset
+ ttl: 3600
+
+# Update the TTL on existing "www.example.net." recordset
+- os_recordset:
+ cloud: mycloud
+ state: present
+ zone: example.net.
+ name: www
+ ttl: 7200
+
+# Delete recorset named "www.example.net."
+- os_recordset:
+ cloud: mycloud
+ state: absent
+ zone: example.net.
+ name: www
+'''
+
+RETURN = '''
+recordset:
+ description: Dictionary describing the recordset.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique recordset ID
+ type: string
+ sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
+ name:
+ description: Recordset name
+ type: string
+ sample: "www.example.net."
+ zone_id:
+ description: Zone id
+ type: string
+ sample: 9508e177-41d8-434e-962c-6fe6ca880af7
+ type:
+ description: Recordset type
+ type: string
+ sample: "A"
+ description:
+ description: Recordset description
+ type: string
+ sample: "Test description"
+ ttl:
+ description: Zone TTL value
+ type: int
+ sample: 3600
+ records:
+ description: Recordset records
+ type: list
+ sample: ['10.0.0.1']
+'''
+
+
+def _system_state_change(state, records, description, ttl, zone, recordset):
+ if state == 'present':
+ if recordset is None:
+ return True
+ if records is not None and recordset.records != records:
+ return True
+ if description is not None and recordset.description != description:
+ return True
+ if ttl is not None and recordset.ttl != ttl:
+ return True
+ if state == 'absent' and recordset:
+ return True
+ return False
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ zone=dict(required=True),
+ name=dict(required=True),
+ recordset_type=dict(required=False),
+ records=dict(required=False, type='list'),
+ description=dict(required=False, default=None),
+ ttl=dict(required=False, default=None, type='int'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ required_if=[
+ ('state', 'present',
+ ['recordset_type', 'records'])],
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) <= StrictVersion('1.8.0'):
+ module.fail_json(msg="To utilize this module, the installed version of "
+ "the shade library MUST be >1.8.0")
+
+ zone = module.params.get('zone')
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ recordset = cloud.get_recordset(zone, name + '.' + zone)
+
+
+ if state == 'present':
+ recordset_type = module.params.get('recordset_type')
+ records = module.params.get('records')
+ description = module.params.get('description')
+ ttl = module.params.get('ttl')
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state,
+ records, description,
+ ttl, zone,
+ recordset))
+
+ if recordset is None:
+ recordset = cloud.create_recordset(
+ zone=zone, name=name, recordset_type=recordset_type,
+ records=records, description=description, ttl=ttl)
+ changed = True
+ else:
+ if records is None:
+ records = []
+
+ pre_update_recordset = recordset
+ changed = _system_state_change(state, records,
+ description, ttl,
+ zone, pre_update_recordset)
+ if changed:
+ zone = cloud.update_recordset(
+ zone, name + '.' + zone,
+ records=records,
+ description=description,
+ ttl=ttl)
+ module.exit_json(changed=changed, recordset=recordset)
+
+ elif state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state,
+ None, None,
+ None,
+ None, recordset))
+
+ if recordset is None:
+ changed=False
+ else:
+ cloud.delete_recordset(zone, name + '.' + zone)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_server_group.py b/lib/ansible/modules/extras/cloud/openstack/os_server_group.py
new file mode 100644
index 0000000000..155c4497cc
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_server_group.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 Catalyst IT Limited
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_server_group
+short_description: Manage OpenStack server groups
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Lingxian Kong (@kong)"
+description:
+ - Add or remove server groups from OpenStack.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource. When I(state) is 'present',
+ then I(policies) is required.
+ choices: ['present', 'absent']
+ required: false
+ default: present
+ name:
+ description:
+ - Server group name.
+ required: true
+ policies:
+ description:
+ - A list of one or more policy names to associate with the server
+ group. The list must contain at least one policy name. The current
+ valid policy names are anti-affinity, affinity, soft-anti-affinity
+ and soft-affinity.
+ required: false
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a server group with 'affinity' policy.
+- os_server_group:
+ state: present
+ auth:
+ auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0
+ username: admin
+ password: admin
+ project_name: admin
+ name: my_server_group
+ policies:
+ - affinity
+
+# Delete 'my_server_group' server group.
+- os_server_group:
+ state: absent
+ auth:
+ auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0
+ username: admin
+ password: admin
+ project_name: admin
+ name: my_server_group
+'''
+
+RETURN = '''
+id:
+ description: Unique UUID.
+ returned: success
+ type: string
+name:
+ description: The name of the server group.
+ returned: success
+ type: string
+policies:
+ description: A list of one or more policy names of the server group.
+ returned: success
+ type: list of strings
+members:
+ description: A list of members in the server group.
+ returned: success
+ type: list of strings
+metadata:
+ description: Metadata key and value pairs.
+ returned: success
+ type: dict
+project_id:
+ description: The project ID who owns the server group.
+ returned: success
+ type: string
+user_id:
+ description: The user ID who owns the server group.
+ returned: success
+ type: string
+'''
+
+
+def _system_state_change(state, server_group):
+ if state == 'present' and not server_group:
+ return True
+ if state == 'absent' and server_group:
+ return True
+
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ policies=dict(required=False, type='list'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ **module_kwargs
+ )
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ policies = module.params['policies']
+ state = module.params['state']
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ server_group = cloud.get_server_group(name)
+
+ if module.check_mode:
+ module.exit_json(
+ changed=_system_state_change(state, server_group)
+ )
+
+ changed = False
+ if state == 'present':
+ if not server_group:
+ if not policies:
+ module.fail_json(
+ msg="Parameter 'policies' is required in Server Group "
+ "Create"
+ )
+ server_group = cloud.create_server_group(name, policies)
+ changed = True
+
+ module.exit_json(
+ changed=changed,
+ id=server_group['id'],
+ server_group=server_group
+ )
+ if state == 'absent':
+ if server_group:
+ cloud.delete_server_group(server_group['id'])
+ changed = True
+ module.exit_json(changed=changed)
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e), extra_data=e.extra_data)
+
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_stack.py b/lib/ansible/modules/extras/cloud/openstack/os_stack.py
new file mode 100644
index 0000000000..503ae635db
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_stack.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# (c) 2016, Steve Baker <sbaker@redhat.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+from time import sleep
+from distutils.version import StrictVersion
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_stack
+short_description: Add/Remove Heat Stack
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
+description:
+ - Add or Remove a Stack to an OpenStack Heat
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ required: false
+ default: present
+ name:
+ description:
+ - Name of the stack that should be created, name could be char and digit, no space
+ required: true
+ template:
+ description:
+ - Path of the template file to use for the stack creation
+ required: false
+ default: None
+ environment:
+ description:
+ - List of environment files that should be used for the stack creation
+ required: false
+ default: None
+ parameters:
+ description:
+ - Dictionary of parameters for the stack creation
+ required: false
+ default: None
+ rollback:
+ description:
+ - Rollback stack creation
+ required: false
+ default: false
+ timeout:
+ description:
+ - Maximum number of seconds to wait for the stack creation
+ required: false
+ default: 3600
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+EXAMPLES = '''
+---
+- name: create stack
+ ignore_errors: True
+ register: stack_create
+ os_stack:
+ name: "{{ stack_name }}"
+ state: present
+ template: "/path/to/my_stack.yaml"
+ environment:
+ - /path/to/resource-registry.yaml
+ - /path/to/environment.yaml
+ parameters:
+ bmc_flavor: m1.medium
+ bmc_image: CentOS
+ key_name: default
+ private_net: {{ private_net_param }}
+ node_count: 2
+ name: undercloud
+ image: CentOS
+ my_flavor: m1.large
+ external_net: {{ external_net_param }}
+'''
+
+RETURN = '''
+id:
+ description: Stack ID.
+ type: string
+ sample: "97a3f543-8136-4570-920e-fd7605c989d6"
+
+stack:
+ action:
+ description: Action, could be Create or Update.
+ type: string
+ sample: "CREATE"
+ creation_time:
+ description: Time when the action has been made.
+ type: string
+ sample: "2016-07-05T17:38:12Z"
+ description:
+ description: Description of the Stack provided in the heat template.
+ type: string
+ sample: "HOT template to create a new instance and networks"
+ id:
+ description: Stack ID.
+ type: string
+ sample: "97a3f543-8136-4570-920e-fd7605c989d6"
+ name:
+ description: Name of the Stack
+ type: string
+ sample: "test-stack"
+ identifier:
+ description: Identifier of the current Stack action.
+ type: string
+ sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
+ links:
+ description: Links to the current Stack.
+ type: list of dict
+ sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
+ outputs:
+ description: Output returned by the Stack.
+ type: list of dict
+ sample: "{'description': 'IP address of server1 in private network',
+ 'output_key': 'server1_private_ip',
+ 'output_value': '10.1.10.103'}"
+ parameters:
+ description: Parameters of the current Stack
+ type: dict
+ sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
+ 'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
+ 'OS::stack_name': 'test-stack',
+ 'stack_status': 'CREATE_COMPLETE',
+ 'stack_status_reason': 'Stack CREATE completed successfully',
+ 'status': 'COMPLETE',
+ 'template_description': 'HOT template to create a new instance and networks',
+ 'timeout_mins': 60,
+ 'updated_time': null}"
+'''
+
+def _create_stack(module, stack, cloud):
+ try:
+ stack = cloud.create_stack(module.params['name'],
+ template_file=module.params['template'],
+ environment_files=module.params['environment'],
+ timeout=module.params['timeout'],
+ wait=True,
+ rollback=module.params['rollback'],
+ **module.params['parameters'])
+
+ stack = cloud.get_stack(stack.id, None)
+ if stack.stack_status == 'CREATE_COMPLETE':
+ return stack
+ else:
+ return False
+ module.fail_json(msg = "Failure in creating stack: ".format(stack))
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+def _update_stack(module, stack, cloud):
+ try:
+ stack = cloud.update_stack(
+ module.params['name'],
+ template_file=module.params['template'],
+ environment_files=module.params['environment'],
+ timeout=module.params['timeout'],
+ rollback=module.params['rollback'],
+ wait=module.params['wait'])
+
+ if stack['stack_status'] == 'UPDATE_COMPLETE':
+ return stack
+ else:
+ module.fail_json(msg = "Failure in updating stack: %s" %
+ stack['stack_status_reason'])
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+def _system_state_change(module, stack, cloud):
+ state = module.params['state']
+ if state == 'present':
+ if not stack:
+ return True
+ if state == 'absent' and stack:
+ return True
+ return False
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ template=dict(default=None),
+ environment=dict(default=None, type='list'),
+ parameters=dict(default={}, type='dict'),
+ rollback=dict(default=False, type='bool'),
+ timeout=dict(default=3600, type='int'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ # stack API introduced in 1.8.0
+ if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
+ module.fail_json(msg='shade 1.8.0 or higher is required for this module')
+
+ state = module.params['state']
+ name = module.params['name']
+ # Check for required parameters when state == 'present'
+ if state == 'present':
+ for p in ['template']:
+ if not module.params[p]:
+ module.fail_json(msg='%s required with present state' % p)
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ stack = cloud.get_stack(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, stack,
+ cloud))
+
+ if state == 'present':
+ if not stack:
+ stack = _create_stack(module, stack, cloud)
+ else:
+ stack = _update_stack(module, stack, cloud)
+ changed = True
+ module.exit_json(changed=changed,
+ stack=stack,
+ id=stack.id)
+ elif state == 'absent':
+ if not stack:
+ changed = False
+ else:
+ changed = True
+ if not cloud.delete_stack(name, wait=module.params['wait']):
+ module.fail_json(msg='delete stack failed for stack: %s' % name)
+ module.exit_json(changed=changed)
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_user_facts.py b/lib/ansible/modules/extras/cloud/openstack/os_user_facts.py
new file mode 100644
index 0000000000..db8cebe475
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_user_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_user_facts
+short_description: Retrieve facts about one or more OpenStack users
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Retrieve facts about a one or more OpenStack users
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - Name or ID of the user
+ required: true
+ domain:
+ description:
+ - Name or ID of the domain containing the user if the cloud supports domains
+ required: false
+ default: None
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created users
+- os_user_facts:
+ cloud: awesomecloud
+- debug: var=openstack_users
+
+# Gather facts about a previously created user by name
+- os_user_facts:
+ cloud: awesomecloud
+ name: demouser
+- debug: var=openstack_users
+
+# Gather facts about a previously created user in a specific domain
+- os_user_facts
+ cloud: awesomecloud
+ name: demouser
+ domain: admindomain
+- debug: var=openstack_users
+
+# Gather facts about a previously created user in a specific domain
+ with filter
+- os_user_facts
+ cloud: awesomecloud
+ name: demouser
+ domain: admindomain
+ filters:
+ enabled: False
+- debug: var=openstack_users
+'''
+
+
+RETURN = '''
+openstack_users:
+ description: has all the OpenStack facts about users
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the user.
+ returned: success
+ type: string
+ enabled:
+ description: Flag to indicate if the user is enabled
+ returned: success
+ type: bool
+ domain_id:
+ description: Domain ID containing the user
+ returned: success
+ type: string
+ default_project_id:
+ description: Default project ID of the user
+ returned: success
+ type: string
+ email:
+ description: Email of the user
+ returned: success
+ type: string
+ username:
+ description: Username of the user
+ returned: success
+ type: string
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ domain=dict(required=False, default=None),
+ filters=dict(required=False, type='dict', default=None),
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ name = module.params['name']
+ domain = module.params['domain']
+ filters = module.params['filters']
+
+ opcloud = shade.operator_cloud(**module.params)
+
+ if domain:
+ try:
+ # We assume admin is passing domain id
+ dom = opcloud.get_domain(domain)['id']
+ domain = dom
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ dom = opcloud.search_domains(filters={'name': domain})
+ if dom:
+ domain = dom[0]['id']
+ else:
+ module.fail_json(msg='Domain name or ID does not exist')
+
+ if not filters:
+ filters = {}
+
+ filters['domain_id'] = domain
+
+ users = opcloud.search_users(name,
+ filters)
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_users=users))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_user_role.py b/lib/ansible/modules/extras/cloud/openstack/os_user_role.py
new file mode 100644
index 0000000000..22f41830c6
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_user_role.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+
+DOCUMENTATION = '''
+---
+module: os_user_role
+short_description: Associate OpenStack Identity users and roles
+extends_documentation_fragment: openstack
+author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
+version_added: "2.1"
+description:
+ - Grant and revoke roles in either project or domain context for
+ OpenStack Identity Users.
+options:
+ role:
+ description:
+ - Name or ID for the role.
+ required: true
+ user:
+ description:
+ - Name or ID for the user. If I(user) is not specified, then
+ I(group) is required. Both may not be specified.
+ required: false
+ default: null
+ group:
+ description:
+ - Name or ID for the group. Valid only with keystone version 3.
+ If I(group) is not specified, then I(user) is required. Both
+ may not be specified.
+ required: false
+ default: null
+ project:
+ description:
+ - Name or ID of the project to scope the role assocation to.
+ If you are using keystone version 2, then this value is required.
+ required: false
+ default: null
+ domain:
+ description:
+ - ID of the domain to scope the role association to. Valid only with
+ keystone version 3, and required if I(project) is not specified.
+ required: false
+ default: null
+ state:
+ description:
+ - Should the roles be present or absent on the user.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Grant an admin role on the user admin in the project project1
+- os_user_role:
+ cloud: mycloud
+ user: admin
+ role: admin
+ project: project1
+
+# Revoke the admin role from the user barney in the newyork domain
+- os_user_role:
+ cloud: mycloud
+ state: absent
+ user: barney
+ role: admin
+ domain: newyork
+'''
+
+RETURN = '''
+#
+'''
+
+def _system_state_change(state, assignment):
+ if state == 'present' and not assignment:
+ return True
+ elif state == 'absent' and assignment:
+ return True
+ return False
+
+
+def _build_kwargs(user, group, project, domain):
+ kwargs = {}
+ if user:
+ kwargs['user'] = user
+ if group:
+ kwargs['group'] = group
+ if project:
+ kwargs['project'] = project
+ if domain:
+ kwargs['domain'] = domain
+ return kwargs
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ role=dict(required=True),
+ user=dict(required=False),
+ group=dict(required=False),
+ project=dict(required=False),
+ domain=dict(required=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs(
+ required_one_of=[
+ ['user', 'group']
+ ])
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ # role grant/revoke API introduced in 1.5.0
+ if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
+ module.fail_json(msg='shade 1.5.0 or higher is required for this module')
+
+ role = module.params.pop('role')
+ user = module.params.pop('user')
+ group = module.params.pop('group')
+ project = module.params.pop('project')
+ domain = module.params.pop('domain')
+ state = module.params.pop('state')
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ filters = {}
+
+ r = cloud.get_role(role)
+ if r is None:
+ module.fail_json(msg="Role %s is not valid" % role)
+ filters['role'] = r['id']
+
+ if user:
+ u = cloud.get_user(user)
+ if u is None:
+ module.fail_json(msg="User %s is not valid" % user)
+ filters['user'] = u['id']
+ if group:
+ g = cloud.get_group(group)
+ if g is None:
+ module.fail_json(msg="Group %s is not valid" % group)
+ filters['group'] = g['id']
+ if domain:
+ d = cloud.get_domain(domain)
+ if d is None:
+ module.fail_json(msg="Domain %s is not valid" % domain)
+ filters['domain'] = d['id']
+ if project:
+ if domain:
+ p = cloud.get_project(project, domain_id=filters['domain'])
+ else:
+ p = cloud.get_project(project)
+
+ if p is None:
+ module.fail_json(msg="Project %s is not valid" % project)
+ filters['project'] = p['id']
+
+ assignment = cloud.list_role_assignments(filters=filters)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, assignment))
+
+ changed = False
+
+ if state == 'present':
+ if not assignment:
+ kwargs = _build_kwargs(user, group, project, domain)
+ cloud.grant_role(role, **kwargs)
+ changed = True
+
+ elif state == 'absent':
+ if assignment:
+ kwargs = _build_kwargs(user, group, project, domain)
+ cloud.revoke_role(role, **kwargs)
+ changed=True
+
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/openstack/os_zone.py b/lib/ansible/modules/extras/cloud/openstack/os_zone.py
new file mode 100644
index 0000000000..0a0e7ed3da
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/openstack/os_zone.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+DOCUMENTATION = '''
+---
+module: os_zone
+short_description: Manage OpenStack DNS zones
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Manage OpenStack DNS zones. Zones can be created, deleted or
+ updated. Only the I(email), I(description), I(ttl) and I(masters) values
+ can be updated.
+options:
+ name:
+ description:
+ - Zone name
+ required: true
+ zone_type:
+ description:
+ - Zone type
+ choices: [primary, secondary]
+ default: None
+ email:
+ description:
+ - Email of the zone owner (only applies if zone_type is primary)
+ required: false
+ description:
+ description:
+ - Zone description
+ required: false
+ default: None
+ ttl:
+ description:
+ - TTL (Time To Live) value in seconds
+ required: false
+ default: None
+ masters:
+ description:
+ - Master nameservers (only applies if zone_type is secondary)
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a zone named "example.net"
+- os_zone:
+ cloud: mycloud
+ state: present
+ name: example.net.
+ zone_type: primary
+ email: test@example.net
+ description: Test zone
+ ttl: 3600
+
+# Update the TTL on existing "example.net." zone
+- os_zone:
+ cloud: mycloud
+ state: present
+ name: example.net.
+ ttl: 7200
+
+# Delete zone named "example.net."
+- os_zone:
+ cloud: mycloud
+ state: absent
+ name: example.net.
+'''
+
+RETURN = '''
+zone:
+ description: Dictionary describing the zone.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique zone ID
+ type: string
+ sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
+ name:
+ description: Zone name
+ type: string
+ sample: "example.net."
+ type:
+ description: Zone type
+ type: string
+ sample: "PRIMARY"
+ email:
+ description: Zone owner email
+ type: string
+ sample: "test@example.net"
+ description:
+ description: Zone description
+ type: string
+ sample: "Test description"
+ ttl:
+ description: Zone TTL value
+ type: int
+ sample: 3600
+ masters:
+ description: Zone master nameservers
+ type: list
+ sample: []
+'''
+
+
+def _system_state_change(state, email, description, ttl, masters, zone):
+ if state == 'present':
+ if not zone:
+ return True
+ if email is not None and zone.email != email:
+ return True
+ if description is not None and zone.description != description:
+ return True
+ if ttl is not None and zone.ttl != ttl:
+ return True
+ if masters is not None and zone.masters != masters:
+ return True
+ if state == 'absent' and zone:
+ return True
+ return False
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ zone_type=dict(required=False, choice=['primary', 'secondary']),
+ email=dict(required=False, default=None),
+ description=dict(required=False, default=None),
+ ttl=dict(required=False, default=None, type='int'),
+ masters=dict(required=False, default=None, type='list'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) < StrictVersion('1.8.0'):
+ module.fail_json(msg="To utilize this module, the installed version of"
+ "the shade library MUST be >=1.8.0")
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ zone = cloud.get_zone(name)
+
+
+ if state == 'present':
+ zone_type = module.params.get('zone_type')
+ email = module.params.get('email')
+ description = module.params.get('description')
+ ttl = module.params.get('ttl')
+ masters = module.params.get('masters')
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, email,
+ description, ttl,
+ masters, zone))
+
+ if zone is None:
+ zone = cloud.create_zone(
+ name=name, zone_type=zone_type, email=email,
+ description=description, ttl=ttl, masters=masters)
+ changed = True
+ else:
+ if masters is None:
+ masters = []
+
+ pre_update_zone = zone
+ changed = _system_state_change(state, email,
+ description, ttl,
+ masters, pre_update_zone)
+ if changed:
+ zone = cloud.update_zone(
+ name, email=email,
+ description=description,
+ ttl=ttl, masters=masters)
+ module.exit_json(changed=changed, zone=zone)
+
+ elif state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, None,
+ None, None,
+ None, zone))
+
+ if zone is None:
+ changed=False
+ else:
+ cloud.delete_zone(name)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/ovh/__init__.py b/lib/ansible/modules/extras/cloud/ovh/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/ovh/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/ovh/ovh_ip_loadbalancing_backend.py b/lib/ansible/modules/extras/cloud/ovh/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 0000000000..7f2c5d5963
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/ovh/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+version_added: "2.2"
+author: Pascal HERAUD @pascalheraud
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
+requirements:
+ - ovh > 0.3.5
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ state:
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines wether the backend is to be created/modified
+ or deleted
+ probe:
+ required: false
+ default: none
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ weight:
+ required: false
+ default: 8
+ description:
+ - Determines the weight for this backend
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ timeout:
+ required: false
+ type: "int"
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+
+'''
+
+EXAMPLES = '''
+# Adds or modify the backend '212.1.1.1' to a
+# loadbalancing 'ip-1.1.1.1'
+- ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+# Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+- ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = long(module.params.get('weight'))
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the propbe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/ovirt/__init__.py b/lib/ansible/modules/extras/cloud/ovirt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/ovirt/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/ovirt/ovirt_auth.py b/lib/ansible/modules/extras/cloud/ovirt/ovirt_auth.py
new file mode 100644
index 0000000000..19ab2e1641
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/ovirt/ovirt_auth.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_auth
+short_description: "Module to manage authentication to oVirt."
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.2"
+description:
+ - "This module authenticates to oVirt engine and creates SSO token, which should be later used in
+ all other oVirt modules, so all modules don't need to perform login and logout.
+ This module returns an Ansible fact called I(ovirt_auth). Every module can use this
+ fact as C(auth) parameter, to perform authentication."
+options:
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - "Specifies if a token should be created or revoked."
+ username:
+ required: True
+ description:
+ - "The name of the user. For example: I(admin@internal)."
+ password:
+ required: True
+ description:
+ - "The password of the user."
+ url:
+ required: True
+ description:
+ - "A string containing the base URL of the server.
+ For example: I(https://server.example.com/ovirt-engine/api)."
+ insecure:
+ required: False
+ description:
+ - "A boolean flag that indicates if the server TLS certificate and host name should be checked."
+ ca_file:
+ required: False
+ description:
+ - "A PEM file containing the trusted CA certificates. The
+ certificate presented by the server will be verified using these CA
+ certificates. If C(ca_file) parameter is not set, system wide
+ CA certificate store is used."
+ timeout:
+ required: False
+ description:
+ - "The maximum total time to wait for the response, in
+ seconds. A value of zero (the default) means wait forever. If
+ the timeout expires before the response is received an exception
+ will be raised."
+ compress:
+ required: False
+ description:
+ - "A boolean flag indicating if the SDK should ask
+ the server to send compressed responses. The default is I(True).
+ Note that this is a hint for the server, and that it may return
+ uncompressed data even when this parameter is set to I(True)."
+ kerberos:
+ required: False
+ description:
+ - "A boolean flag indicating if Kerberos authentication
+ should be used instead of the default basic authentication."
+notes:
+ - "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
+ when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
+ For an example of how to achieve that, please take a look at I(examples) section."
+'''
+
+EXAMPLES = '''
+tasks:
+ - block:
+ # Create a vault with `ovirt_password` variable which store your
+ # oVirt user's password, and include that yaml file with variable:
+ - include_vars: ovirt_password.yml
+
+ # Always be sure to pass 'no_log: true' to ovirt_auth task,
+ # so the oVirt user's password is not logged:
+ - name: Obtain SSO token with using username/password credentials:
+ no_log: true
+ ovirt_auth:
+ url: https://ovirt.example.com/ovirt-engine/api
+ username: admin@internal
+ ca_file: ca.pem
+ password: "{{ ovirt_password }}"
+
+ # Previous task generated I(ovirt_auth) fact, which you can later use
+ # in different modules as follows:
+ - ovirt_vms:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: myvm
+
+ always:
+ - name: Always revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+'''
+
+RETURN = '''
+ovirt_auth:
+ description: Authentication facts, needed to perform authentication to oVirt.
+ returned: success
+ type: dictionary
+ contains:
+ token:
+ description: SSO token which is used for connection to oVirt engine.
+ returned: success
+ type: string
+ sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
+ url:
+ description: URL of the oVirt engine API endpoint.
+ returned: success
+ type: string
+ sample: "https://ovirt.example.com/ovirt-engine/api"
+ ca_file:
+ description: CA file, which is used to verify SSL/TLS connection.
+ returned: success
+ type: string
+ sample: "ca.pem"
+ insecure:
+ description: Flag indicating if insecure connection is used.
+ returned: success
+ type: bool
+ sample: False
+ timeout:
+ description: Number of seconds to wait for response.
+ returned: success
+ type: int
+ sample: 0
+ compress:
+ description: Flag indicating if compression is used for connection.
+ returned: success
+ type: bool
+ sample: True
+ kerberos:
+ description: Flag indicating if kerberos is used for authentication.
+ returned: success
+ type: bool
+ sample: False
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None),
+ ca_file=dict(default=None),
+ insecure=dict(required=False, type='bool', default=False),
+ timeout=dict(required=False, type='int', default=0),
+ compress=dict(required=False, type='bool', default=True),
+ kerberos=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ovirt_auth=dict(required=None, type='dict'),
+ ),
+ required_if=[
+ ('state', 'absent', ['ovirt_auth']),
+ ('state', 'present', ['username', 'password', 'url']),
+ ],
+ )
+ check_sdk(module)
+
+ state = module.params.get('state')
+ if state == 'present':
+ params = module.params
+ elif state == 'absent':
+ params = module.params['ovirt_auth']
+
+ connection = sdk.Connection(
+ url=params.get('url'),
+ username=params.get('username'),
+ password=params.get('password'),
+ ca_file=params.get('ca_file'),
+ insecure=params.get('insecure'),
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ token=params.get('token'),
+ )
+ try:
+ token = connection.authenticate()
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_auth=dict(
+ token=token,
+ url=params.get('url'),
+ ca_file=params.get('ca_file'),
+ insecure=params.get('insecure'),
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ ) if state == 'present' else dict()
+ )
+ )
+ except Exception as e:
+ module.fail_json(msg="Error: %s" % e)
+ finally:
+ # Close the connection, but don't revoke token
+ connection.close(logout=state == 'absent')
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ovirt import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/cloud/ovirt/ovirt_disks.py b/lib/ansible/modules/extras/cloud/ovirt/ovirt_disks.py
new file mode 100644
index 0000000000..a8f84c26e8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/ovirt/ovirt_disks.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_disks
+short_description: "Module to manage Virtual Machine and floating disks in oVirt."
+version_added: "2.2"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage Virtual Machine and floating disks in oVirt."
+options:
+ id:
+ description:
+ - "ID of the disk to manage. Either C(id) or C(name) is required."
+ name:
+ description:
+ - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
+ aliases: ['alias']
+ vm_name:
+ description:
+ - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ vm_id:
+ description:
+ - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ state:
+ description:
+ - "Should the Virtual Machine disk be present/absent/attached/detached."
+ choices: ['present', 'absent', 'attached', 'detached']
+ default: 'present'
+ size:
+ description:
+ - "Size of the disk. Size should be specified using IEC standard units. For example 10GiB, 1024MiB, etc."
+ interface:
+ description:
+ - "Driver of the storage interface."
+ choices: ['virtio', 'ide', 'virtio_scsi']
+ default: 'virtio'
+ format:
+ description:
+ - "Format of the disk. Either copy-on-write or raw."
+ choices: ['raw', 'cow']
+ storage_domain:
+ description:
+ - "Storage domain name where disk should be created. By default storage is chosen by oVirt engine."
+ profile:
+ description:
+ - "Disk profile name to be attached to disk. By default profile is chosen by oVirt engine."
+ bootable:
+ description:
+ - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
+ shareable:
+ description:
+ - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
+ logical_unit:
+ description:
+ - "Dictionary which describes LUN to be directly attached to VM:"
+ - "C(address) - Address of the storage server. Used by iSCSI."
+ - "C(port) - Port of the storage server. Used by iSCSI."
+ - "C(target) - iSCSI target."
+ - "C(lun_id) - LUN id."
+ - "C(username) - CHAP Username to be used to access storage server. Used by iSCSI."
+ - "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI."
+ - "C(storage_type) - Storage type either I(fcp) or I(iscsi)."
+extends_documentation_fragment: ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create and attach new disk to VM
+- ovirt_disks:
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+
+# Attach logical unit to VM rhel7
+- ovirt_disks:
+ vm_name: rhel7
+ logical_unit:
+ target: iqn.2016-08-09.brq.str-01:omachace
+ id: 1IET_000d0001
+ address: 10.34.63.204
+ interface: virtio
+
+# Detach disk from VM
+- ovirt_disks:
+ state: detached
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+'''
+
+
+RETURN = '''
+id:
+ description: "ID of the managed disk"
+ returned: "On success if disk is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+disk:
+ description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/disk."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
+
+disk_attachment:
+ description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
+ on your oVirt instance at following url:
+ https://ovirt.example.com/ovirt-engine/api/model#types/disk_attachment."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
+'''
+
+
+
+def _search_by_lun(disks_service, lun_id):
+ """
+ Find disk by LUN ID.
+ """
+ res = [
+ disk for disk in disks_service.list(search='disk_type=lun') if (
+ disk.lun_storage.id == lun_id
+ )
+ ]
+ return res[0] if res else None
+
+
+class DisksModule(BaseModule):
+
+ def build_entity(self):
+ logical_unit = self._module.params.get('logical_unit')
+ return otypes.Disk(
+ id=self._module.params.get('id'),
+ name=self._module.params.get('name'),
+ description=self._module.params.get('description'),
+ format=otypes.DiskFormat(
+ self._module.params.get('format')
+ ) if self._module.params.get('format') else None,
+ provisioned_size=convert_to_bytes(
+ self._module.params.get('size')
+ ),
+ storage_domains=[
+ otypes.StorageDomain(
+ name=self._module.params.get('storage_domain'),
+ ),
+ ],
+ shareable=self._module.params.get('shareable'),
+ lun_storage=otypes.HostStorage(
+ type=otypes.StorageType(
+ logical_unit.get('storage_type', 'iscsi')
+ ),
+ logical_units=[
+ otypes.LogicalUnit(
+ address=logical_unit.get('address'),
+ port=logical_unit.get('port', 3260),
+ target=logical_unit.get('target'),
+ id=logical_unit.get('id'),
+ username=logical_unit.get('username'),
+ password=logical_unit.get('password'),
+ )
+ ],
+ ) if logical_unit else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and
+ equal(self._module.params.get('format'), str(entity.format)) and
+ equal(self._module.params.get('shareable'), entity.shareable)
+ )
+
+
+class DiskAttachmentsModule(DisksModule):
+
+ def build_entity(self):
+ return otypes.DiskAttachment(
+ disk=super(DiskAttachmentsModule, self).build_entity(),
+ interface=otypes.DiskInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ bootable=self._module.params.get('bootable'),
+ active=True,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('bootable'), entity.bootable)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'attached', 'detached'],
+ default='present'
+ ),
+ id=dict(default=None),
+ name=dict(default=None, aliases=['alias']),
+ vm_name=dict(default=None),
+ vm_id=dict(default=None),
+ size=dict(default=None),
+ interface=dict(default=None,),
+ allocation_policy=dict(default=None),
+ storage_domain=dict(default=None),
+ profile=dict(default=None),
+ format=dict(default=None, choices=['raw', 'cow']),
+ bootable=dict(default=None, type='bool'),
+ shareable=dict(default=None, type='bool'),
+ logical_unit=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ disk = None
+ state = module.params['state']
+ connection = create_connection(module.params.pop('auth'))
+ disks_service = connection.system_service().disks_service()
+ disks_module = DisksModule(
+ connection=connection,
+ module=module,
+ service=disks_service,
+ )
+
+ lun = module.params.get('logical_unit')
+ if lun:
+ disk = _search_by_lun(disks_service, lun.get('id'))
+
+ ret = None
+ # First take care of creating the VM, if needed:
+ if state == 'present' or state == 'detached' or state == 'attached':
+ ret = disks_module.create(
+ entity=disk,
+ result_state=otypes.DiskStatus.OK if lun is None else None,
+ )
+ # We need to pass ID to the module, so in case we want detach/attach disk
+ # we have this ID specified to attach/detach method:
+ module.params['id'] = ret['id'] if disk is None else disk.id
+ elif state == 'absent':
+ ret = disks_module.remove()
+
+ # If VM was passed attach/detach disks to/from the VM:
+ if 'vm_id' in module.params or 'vm_name' in module.params and state != 'absent':
+ vms_service = connection.system_service().vms_service()
+
+ # If `vm_id` isn't specified, find VM by name:
+ vm_id = module.params['vm_id']
+ if vm_id is None:
+ vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None)
+
+ if vm_id is None:
+ module.fail_json(
+ msg="VM don't exists, please create it first."
+ )
+
+ disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service()
+ disk_attachments_module = DiskAttachmentsModule(
+ connection=connection,
+ module=module,
+ service=disk_attachments_service,
+ changed=ret['changed'] if ret else False,
+ )
+
+ if state == 'present' or state == 'attached':
+ ret = disk_attachments_module.create()
+ elif state == 'detached':
+ ret = disk_attachments_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/cloud/ovirt/ovirt_vms.py b/lib/ansible/modules/extras/cloud/ovirt/ovirt_vms.py
new file mode 100644
index 0000000000..0652b24c10
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/ovirt/ovirt_vms.py
@@ -0,0 +1,806 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_vms
+short_description: "Module to manage Virtual Machines in oVirt."
+version_added: "2.2"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "This module manages whole lifecycle of the Virtual Machine(VM) in oVirt. Since VM can hold many states in oVirt,
+ this see notes to see how the states of the VM are handled."
+options:
+ name:
+ description:
+ - "Name of the the Virtual Machine to manage. If VM don't exists C(name) is required.
+ Otherwise C(id) or C(name) can be used."
+ id:
+ description:
+ - "ID of the the Virtual Machine to manage."
+ state:
+ description:
+ - "Should the Virtual Machine be running/stopped/present/absent/suspended/next_run."
+ - "I(present) and I(running) are equal states."
+ - "I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted."
+ - "Please check I(notes) to more detailed description of states."
+ choices: ['running', 'stopped', 'present', 'absent', 'suspended', 'next_run']
+ default: present
+ cluster:
+ description:
+ - "Name of the cluster, where Virtual Machine should be created. Required if creating VM."
+ template:
+ description:
+ - "Name of the template, which should be used to create Virtual Machine. Required if creating VM."
+ - "If template is not specified and VM doesn't exist, VM will be created from I(Blank) template."
+ memory:
+ description:
+ - "Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)."
+ - "Default value is set by engine."
+ memory_guaranteed:
+ description:
+ - "Amount of minimal guaranteed memory of the Virtual Machine.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)."
+ - "C(memory_guaranteed) parameter can't be lower than C(memory) parameter. Default value is set by engine."
+ cpu_shares:
+ description:
+ - "Set a CPU shares for this Virtual Machine. Default value is set by oVirt engine."
+ cpu_cores:
+ description:
+ - "Number of virtual CPUs cores of the Virtual Machine. Default value is set by oVirt engine."
+ cpu_sockets:
+ description:
+ - "Number of virtual CPUs sockets of the Virtual Machine. Default value is set by oVirt engine."
+ type:
+ description:
+ - "Type of the Virtual Machine. Default value is set by oVirt engine."
+ choices: [server, desktop]
+ operating_system:
+ description:
+ - "Operating system of the Virtual Machine. Default value is set by oVirt engine."
+ choices: [
+ rhel_6_ppc64, other, freebsd, windows_2003x64, windows_10, rhel_6x64, rhel_4x64, windows_2008x64,
+ windows_2008R2x64, debian_7, windows_2012x64, ubuntu_14_04, ubuntu_12_04, ubuntu_13_10, windows_8x64,
+ other_linux_ppc64, windows_2003, other_linux, windows_10x64, windows_2008, rhel_3, rhel_5, rhel_4,
+ other_ppc64, sles_11, rhel_6, windows_xp, rhel_7x64, freebsdx64, rhel_7_ppc64, windows_7, rhel_5x64,
+ ubuntu_14_04_ppc64, sles_11_ppc64, windows_8, windows_2012R2x64, windows_2008r2x64, ubuntu_13_04,
+ ubuntu_12_10, windows_7x64
+ ]
+ boot_devices:
+ description:
+ - "List of boot devices which should be used to boot. Choices I(network), I(hd) and I(cdrom)."
+ - "For example: ['cdrom', 'hd']. Default value is set by oVirt engine."
+ host:
+ description:
+ - "Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler."
+ - "This parameter is used only when C(state) is I(running) or I(present)."
+ high_availability:
+ description:
+ - "If I(True) Virtual Machine will be set as highly available."
+ - "If I(False) Virtual Machine won't be set as highly available."
+ - "If no value is passed, default value is set by oVirt engine."
+ delete_protected:
+ description:
+ - "If I(True) Virtual Machine will be set as delete protected."
+ - "If I(False) Virtual Machine won't be set as delete protected."
+ - "If no value is passed, default value is set by oVirt engine."
+ stateless:
+ description:
+ - "If I(True) Virtual Machine will be set as stateless."
+ - "If I(False) Virtual Machine will be unset as stateless."
+ - "If no value is passed, default value is set by oVirt engine."
+ clone:
+ description:
+ - "If I(True) then the disks of the created virtual machine will be cloned and independent of the template."
+ - "This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before."
+ default: False
+ clone_permissions:
+ description:
+ - "If I(True) then the permissions of the template (only the direct ones, not the inherited ones)
+ will be copied to the created virtual machine."
+ - "This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before."
+ default: False
+ cd_iso:
+ description:
+ - "ISO file from ISO storage domain which should be attached to Virtual Machine."
+ - "If you pass empty string the CD will be ejected from VM."
+ - "If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM."
+ - "If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently."
+ force:
+ description:
+ - "Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
+ in different situations."
+ default: False
+ nics:
+ description:
+ - "List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary:"
+ - "C(name) - Name of the NIC."
+ - "C(profile_name) - Profile name where NIC should be attached."
+ - "C(interface) - Type of the network interface. One of following: I(virtio), I(e1000), I(rtl8139), default is I(virtio)."
+ - "C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool."
+ - "C(Note:)"
+ - "This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ To manage NICs of the VM in more depth please use M(ovirt_nics) module instead."
+ disks:
+ description:
+ - "List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary:"
+ - "C(name) - Name of the disk. Either C(name) or C(id) is reuqired."
+ - "C(id) - ID of the disk. Either C(name) or C(id) is reuqired."
+ - "C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio)."
+ - "C(bootable) - I(True) if the disk should be bootable, default is non bootable."
+ - "C(activate) - I(True) if the disk should be activated, default is activated."
+ - "C(Note:)"
+ - "This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
+ To manage disks of the VM in more depth please use M(ovirt_disks) module instead."
+ sysprep:
+ description:
+ - "Dictionary with values for Windows Virtual Machine initialization using sysprep:"
+ - "C(host_name) - Hostname to be set to Virtual Machine when deployed."
+ - "C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user."
+ - "C(org_name) - Organization name to be set to Windows Virtual Machine."
+ - "C(domain) - Domain to be set to Windows Virtual Machine."
+ - "C(timezone) - Timezone to be set to Windows Virtual Machine."
+ - "C(ui_language) - UI language of the Windows Virtual Machine."
+ - "C(system_locale) - System localization of the Windows Virtual Machine."
+ - "C(input_locale) - Input localization of the Windows Virtual Machine."
+ - "C(windows_license_key) - License key to be set to Windows Virtual Machine."
+ - "C(user_name) - Username to be used for set password to Windows Virtual Machine."
+ - "C(root_password) - Password to be set for username to Windows Virtual Machine."
+ cloud_init:
+ description:
+ - "Dictionary with values for Unix-like Virtual Machine initialization using cloud init:"
+ - "C(host_name) - Hostname to be set to Virtual Machine when deployed."
+ - "C(timezone) - Timezone to be set to Virtual Machine when deployed."
+ - "C(user_name) - Username to be used to set password to Virtual Machine when deployed."
+ - "C(root_password) - Password to be set for user specified by C(user_name) parameter."
+ - "C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine."
+ - "C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine."
+ - "C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed."
+ - "C(dns_servers) - DNS servers to be configured on Virtual Machine."
+ - "C(dns_search) - DNS search domains to be configured on Virtual Machine."
+ - "C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of None, DHCP or Static."
+ - "C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine."
+ - "C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine."
+ - "C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine."
+ - "C(nic_name) - Set name to network interface of Virtual Machine."
+ - "C(nic_on_boot) - If I(True) network interface will be set to start on boot."
+notes:
+ - "If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
+ If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
+ If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
+ If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
+ get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
+ if the shutdown operation fails.
+ When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
+ When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
+ any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
+ I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
+ I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
+ When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
+ we start the VM. Then we suspend the VM.
+ When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Creates a new Virtual Machine from template named 'rhel7_template'
+ovirt_vms:
+ state: present
+ name: myvm
+ template: rhel7_template
+
+# Creates a new server rhel7 Virtual Machine from Blank template
+# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
+# and attach bootable disk with name rhel7_disk and attach virtio NIC
+ovirt_vms:
+ state: present
+ cluster: brq01
+ name: myvm
+ memory: 2GiB
+ cpu_cores: 2
+ cpu_sockets: 2
+ cpu_shares: 1024
+ type: server
+ operating_system: rhel_7x64
+ disks:
+ - name: rhel7_disk
+ bootable: True
+ nics:
+ - name: nic1
+
+# Run VM with cloud init:
+ovirt_vms:
+ name: rhel7
+ template: rhel7
+ cluster: Default
+ memory: 1GiB
+ high_availability: true
+ cloud_init:
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ nic_on_boot: true
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+
+# Run VM with sysprep:
+ovirt_vms:
+ name: windows2012R2_AD
+ template: windows2012R2
+ cluster: Default
+ memory: 3GiB
+ high_availability: true
+ sysprep:
+ host_name: windowsad.example.com
+ user_name: Administrator
+ root_password: SuperPassword123
+
+# Migrate/Run VM to/on host named 'host1'
+ovirt_vms:
+ state: running
+ name: myvm
+ host: host1
+
+# Change Vm's CD:
+ovirt_vms:
+ name: myvm
+ cd_iso: drivers.iso
+
+# Eject Vm's CD:
+ovirt_vms:
+ name: myvm
+ cd_iso: ''
+
+# Boot VM from CD:
+ovirt_vms:
+ name: myvm
+ cd_iso: centos7_x64.iso
+ boot_devices:
+ - cdrom
+
+# Stop vm:
+ovirt_vms:
+ state: stopped
+ name: myvm
+
+# Upgrade memory to already created VM:
+ovirt_vms:
+ name: myvm
+ memory: 4GiB
+
+# Hot plug memory to already created and running VM:
+# (VM won't be restarted)
+ovirt_vms:
+ name: myvm
+ memory: 4GiB
+
+# When change on the VM needs restart of the VM, use next_run state,
+# The VM will be updated and rebooted if there are any changes.
+# If present state would be used, VM won't be restarted.
+ovirt_vms:
+ state: next_run
+ name: myvm
+ boot_devices:
+ - network
+
+# Remove VM, if VM is running it will be stopped:
+ovirt_vms:
+ state: absent
+ name: myvm
+'''
+
+
+RETURN = '''
+id:
+ description: ID of the VM which is managed
+ returned: On success if VM is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm:
+ description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm."
+ returned: On success if VM is found.
+'''
+
+
+class VmsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Vm(
+ name=self._module.params['name'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ template=otypes.Template(
+ name=self._module.params['template']
+ ) if self._module.params['template'] else None,
+ stateless=self._module.params['stateless'],
+ delete_protected=self._module.params['delete_protected'],
+ high_availability=otypes.HighAvailability(
+ enabled=self._module.params['high_availability']
+ ) if self._module.params['high_availability'] is not None else None,
+ cpu=otypes.Cpu(
+ topology=otypes.CpuTopology(
+ cores=self._module.params['cpu_cores'],
+ sockets=self._module.params['cpu_sockets'],
+ )
+ ) if (
+ self._module.params['cpu_cores'] or self._module.params['cpu_sockets']
+ ) else None,
+ cpu_shares=self._module.params['cpu_shares'],
+ os=otypes.OperatingSystem(
+ type=self._module.params['operating_system'],
+ boot=otypes.Boot(
+ devices=[
+ otypes.BootDevice(dev) for dev in self._module.params['boot_devices']
+ ],
+ ) if self._module.params['boot_devices'] else None,
+ ) if (
+ self._module.params['operating_system'] or self._module.params['boot_devices']
+ ) else None,
+ type=otypes.VmType(
+ self._module.params['type']
+ ) if self._module.params['type'] else None,
+ memory=convert_to_bytes(
+ self._module.params['memory']
+ ) if self._module.params['memory'] else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self._module.params['memory_guaranteed']),
+ ) if self._module.params['memory_guaranteed'] else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(convert_to_bytes(self._module.params['memory']), entity.memory) and
+ equal(convert_to_bytes(self._module.params['memory_guaranteed']), entity.memory_policy.guaranteed) and
+ equal(self._module.params.get('cpu_cores'), entity.cpu.topology.cores) and
+ equal(self._module.params.get('cpu_sockets'), entity.cpu.topology.sockets) and
+ equal(self._module.params.get('type'), str(entity.type)) and
+ equal(self._module.params.get('operating_system'), str(entity.os.type)) and
+ equal(self._module.params.get('high_availability'), entity.high_availability.enabled) and
+ equal(self._module.params.get('stateless'), entity.stateless) and
+ equal(self._module.params.get('cpu_shares'), entity.cpu_shares) and
+ equal(self._module.params.get('delete_protected'), entity.delete_protected) and
+ equal(self._module.params.get('boot_devices'), [str(dev) for dev in getattr(entity.os, 'devices', [])])
+ )
+
+ def pre_create(self, entity):
+ # If VM don't exists, and template is not specified, set it to Blank:
+ if entity is None:
+ if self._module.params.get('template') is None:
+ self._module.params['template'] = 'Blank'
+
+ def post_update(self, entity):
+ self.post_create(entity)
+
+ def post_create(self, entity):
+ # After creation of the VM, attach disks and NICs:
+ self.changed = self.__attach_disks(entity)
+ self.changed = self.__attach_nics(entity)
+
+ def pre_remove(self, entity):
+ # Forcibly stop the VM, if it's not in DOWN state:
+ if entity.status != otypes.VmStatus.DOWN:
+ if not self._module.check_mode:
+ self.changed = self.action(
+ action='stop',
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )['changed']
+
+ def __suspend_shutdown_common(self, vm_service):
+ if vm_service.get().status in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]:
+ self._wait_for_UP(vm_service)
+
+ def _pre_shutdown_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _pre_suspend_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _post_start_action(self, entity):
+ vm_service = self._service.service(entity.id)
+ self._wait_for_UP(vm_service)
+ self._attach_cd(vm_service.get())
+ self._migrate_vm(vm_service.get())
+
+ def _attach_cd(self, entity):
+ cd_iso = self._module.params['cd_iso']
+ if cd_iso is not None:
+ vm_service = self._service.service(entity.id)
+ current = vm_service.get().status == otypes.VmStatus.UP
+ cdroms_service = vm_service.cdroms_service()
+ cdrom_device = cdroms_service.list()[0]
+ cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
+ cdrom = cdrom_service.get(current=current)
+ if getattr(cdrom.file, 'id', '') != cd_iso:
+ if not self._module.check_mode:
+ cdrom_service.update(
+ cdrom=otypes.Cdrom(
+ file=otypes.File(id=cd_iso)
+ ),
+ current=current,
+ )
+ self.changed = True
+
+ return entity
+
+ def _migrate_vm(self, entity):
+ vm_host = self._module.params['host']
+ vm_service = self._service.vm_service(entity.id)
+ if vm_host is not None:
+ # In case VM is preparing to be UP, wait to be up, to migrate it:
+ if entity.status == otypes.VmStatus.UP:
+ hosts_service = self._connection.system_service().hosts_service()
+ current_vm_host = hosts_service.host_service(entity.host.id).get().name
+ if vm_host != current_vm_host:
+ if not self._module.check_mode:
+ vm_service.migrate(host=otypes.Host(name=vm_host))
+ self._wait_for_UP(vm_service)
+ self.changed = True
+
+ return entity
+
+ def _wait_for_UP(self, vm_service):
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ )
+
+ def __attach_disks(self, entity):
+ disks_service = self._connection.system_service().disks_service()
+
+ for disk in self._module.params['disks']:
+ # If disk ID is not specified, find disk by name:
+ disk_id = disk.get('id')
+ if disk_id is None:
+ disk_id = getattr(
+ search_by_name(
+ service=disks_service,
+ name=disk.get('name')
+ ),
+ 'id',
+ None
+ )
+
+ # Attach disk to VM:
+ disk_attachments_service = self._service.service(entity.id).disk_attachments_service()
+ if disk_attachments_service.attachment_service(disk_id).get() is None:
+ if not self._module.check_mode:
+ disk_attachments_service.add(
+ otypes.DiskAttachment(
+ disk=otypes.Disk(
+ id=disk_id,
+ active=disk.get('activate', True),
+ ),
+ interface=otypes.DiskInterface(
+ disk.get('interface', 'virtio')
+ ),
+ bootable=disk.get('bootable', False),
+ )
+ )
+ self.changed = True
+
+ def __attach_nics(self, entity):
+ # Attach NICs to VM, if specified:
+ vnic_profiles_service = self._connection.system_service().vnic_profiles_service()
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self._module.params['nics']:
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=search_by_name(
+ vnic_profiles_service,
+ nic.get('profile_name'),
+ ).id
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+
+def _get_initialization(sysprep, cloud_init):
+ initialization = None
+ if cloud_init:
+ initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ cloud_init.pop('nic_boot_protocol').lower()
+ ) if cloud_init.get('nic_boot_protocol') else None,
+ name=cloud_init.pop('nic_name'),
+ on_boot=cloud_init.pop('nic_on_boot'),
+ ip=otypes.Ip(
+ address=cloud_init.pop('nic_ip_address'),
+ netmask=cloud_init.pop('nic_netmask'),
+ gateway=cloud_init.pop('nic_gateway'),
+ ) if (
+ cloud_init.get('nic_gateway') is not None or
+ cloud_init.get('nic_netmask') is not None or
+ cloud_init.get('nic_ip_address') is not None
+ ) else None,
+ )
+ ] if (
+ cloud_init.get('nic_gateway') is not None or
+ cloud_init.get('nic_netmask') is not None or
+ cloud_init.get('nic_ip_address') is not None or
+ cloud_init.get('nic_boot_protocol') is not None or
+ cloud_init.get('nic_on_boot') is not None
+ ) else None,
+ **cloud_init
+ )
+ elif sysprep:
+ initialization = otypes.Initialization(
+ **sysprep
+ )
+ return initialization
+
+
+def control_state(vm, vms_service, module):
+ if vm is None:
+ return
+
+ force = module.params['force']
+ state = module.params['state']
+
+ vm_service = vms_service.vm_service(vm.id)
+ if vm.status == otypes.VmStatus.IMAGE_LOCKED:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ elif vm.status == otypes.VmStatus.SAVING_STATE:
+ # Result state is SUSPENDED, we should wait to be suspended:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif (
+ vm.status == otypes.VmStatus.UNASSIGNED or
+ vm.status == otypes.VmStatus.UNKNOWN
+ ):
+ # Invalid states:
+ module.fail_json("Not possible to control VM, if it's in '{}' status".format(vm.status))
+ elif vm.status == otypes.VmStatus.POWERING_DOWN:
+ if (force and state == 'stopped') or state == 'absent':
+ vm_service.stop()
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ else:
+ # If VM is powering down, wait to be DOWN or UP.
+ # VM can end in UP state in case there is no GA
+ # or ACPI on the VM or shutdown operation crashed:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['running', 'stopped', 'present', 'absent', 'suspended', 'next_run'],
+ default='present',
+ ),
+ name=dict(default=None),
+ id=dict(default=None),
+ cluster=dict(default=None),
+ template=dict(default=None),
+ disks=dict(default=[], type='list'),
+ memory=dict(default=None),
+ memory_guaranteed=dict(default=None),
+ cpu_sockets=dict(default=None, type='int'),
+ cpu_cores=dict(default=None, type='int'),
+ cpu_shares=dict(default=None, type='int'),
+ type=dict(choices=['server', 'desktop']),
+ operating_system=dict(
+ default=None,
+ choices=[
+ 'rhel_6_ppc64', 'other', 'freebsd', 'windows_2003x64', 'windows_10',
+ 'rhel_6x64', 'rhel_4x64', 'windows_2008x64', 'windows_2008R2x64',
+ 'debian_7', 'windows_2012x64', 'ubuntu_14_04', 'ubuntu_12_04',
+ 'ubuntu_13_10', 'windows_8x64', 'other_linux_ppc64', 'windows_2003',
+ 'other_linux', 'windows_10x64', 'windows_2008', 'rhel_3', 'rhel_5',
+ 'rhel_4', 'other_ppc64', 'sles_11', 'rhel_6', 'windows_xp', 'rhel_7x64',
+ 'freebsdx64', 'rhel_7_ppc64', 'windows_7', 'rhel_5x64',
+ 'ubuntu_14_04_ppc64', 'sles_11_ppc64', 'windows_8',
+ 'windows_2012R2x64', 'windows_2008r2x64', 'ubuntu_13_04',
+ 'ubuntu_12_10', 'windows_7x64',
+ ],
+ ),
+ cd_iso=dict(default=None),
+ boot_devices=dict(default=None, type='list'),
+ high_availability=dict(type='bool'),
+ stateless=dict(type='bool'),
+ delete_protected=dict(type='bool'),
+ force=dict(type='bool', default=False),
+ nics=dict(default=[], type='list'),
+ cloud_init=dict(type='dict'),
+ sysprep=dict(type='dict'),
+ host=dict(default=None),
+ clone=dict(type='bool', default=False),
+ clone_permissions=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ state = module.params['state']
+ connection = create_connection(module.params.pop('auth'))
+ vms_service = connection.system_service().vms_service()
+ vms_module = VmsModule(
+ connection=connection,
+ module=module,
+ service=vms_service,
+ )
+ vm = vms_module.search_entity()
+
+ control_state(vm, vms_service, module)
+ if state == 'present' or state == 'running' or state == 'next_run':
+ cloud_init = module.params['cloud_init']
+ sysprep = module.params['sysprep']
+
+ # In case VM don't exist, wait for VM DOWN state,
+ # otherwise don't wait for any state, just update VM:
+ vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ ret = vms_module.action(
+ action='start',
+ post_action=vms_module._post_start_action,
+ action_condition=lambda vm: (
+ vm.status not in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]
+ ),
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ # Start action kwargs:
+ use_cloud_init=cloud_init is not None,
+ use_sysprep=sysprep is not None,
+ vm=otypes.Vm(
+ placement_policy=otypes.VmPlacementPolicy(
+ hosts=[otypes.Host(name=module.params['host'])]
+ ) if module.params['host'] else None,
+ initialization=_get_initialization(sysprep, cloud_init),
+ ),
+ )
+
+ if state == 'next_run':
+ # Apply next run configuration, if needed:
+ vm = vms_service.vm_service(ret['id']).get()
+ if vm.next_run_configuration_exists:
+ ret = vms_module.action(
+ action='reboot',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+ elif state == 'stopped':
+ vms_module.create(
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ if module.params['force']:
+ ret = vms_module.action(
+ action='stop',
+ post_action=vms_module._attach_cd,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ else:
+ ret = vms_module.action(
+ action='shutdown',
+ pre_action=vms_module._pre_shutdown_action,
+ post_action=vms_module._attach_cd,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ elif state == 'suspended':
+ vms_module.create(
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ ret = vms_module.action(
+ action='suspend',
+ pre_action=vms_module._pre_suspend_action,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif state == 'absent':
+ ret = vms_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/cloud/profitbricks/__init__.py b/lib/ansible/modules/extras/cloud/profitbricks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/profitbricks/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/profitbricks/profitbricks.py b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks.py
new file mode 100644
index 0000000000..7c9f23f6bb
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks.py
@@ -0,0 +1,662 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ choices: ["yes", "no"]
+ name:
+ description:
+ - The name of the virtual machine.
+ required: true
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ required: true
+ image_password:
+ description:
+ - Password set for the administrative user.
+ required: false
+ version_added: '2.2'
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ required: false
+ version_added: '2.2'
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ required: false
+ default: null
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ required: false
+ default: 2
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ required: false
+ default: 2048
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ required: false
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ version_added: '2.2'
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ required: false
+ count:
+ description:
+ - The number of virtual machines to create.
+ required: false
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ required: false
+ default: false
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ required: false
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ default: null
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ default: null
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ required: false
+ default: "yes"
+ choices: ["yes", "no"]
+ state:
+ description:
+ - create or terminate instances
+ required: false
+ default: 'present'
+ choices: [ "running", "stopped", "absent", "present" ]
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example. This will create three servers and enumerate their names.
+
+- profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+# Removing Virtual machines
+
+- profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+# Starting Virtual Machines.
+
+- profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+# Stopping Virtual Machines
+
+- profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s: %s" % (name, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError, e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception as e:
+ module.fail_json(msg="failed to terminate the virtual server: %s" % str(e))
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % str(e))
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(default=None),
+ ssh_keys=dict(type='list', default=[]),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % str(e))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_datacenter.py b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_datacenter.py
new file mode 100644
index 0000000000..0b21d3e4cd
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_datacenter.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter.
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ required: true
+ description:
+ description:
+ - The description of the virtual datacenter.
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Create a Datacenter
+- profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter.
+- profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+import sys
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ virtual_datacenters = []
+
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if(uuid_match.match(name)):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600),
+ state=dict(default='present'),
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_nic.py b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_nic.py
new file mode 100644
index 0000000000..902d526684
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_nic.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC.
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ required: true
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ required: true
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ required: false
+ default: 'present'
+ choices: ["present", "absent"]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Create a NIC
+- profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+# Remove a NIC
+- profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
+ lan=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required')
+
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('lan'):
+ module.fail_json(msg='lan parameter is required')
+
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+main() \ No newline at end of file
diff --git a/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume.py b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume.py
new file mode 100644
index 0000000000..1cee967675
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume.
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ required: true
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ required: true
+ size:
+ description:
+ - The size of the volume.
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ required: true
+ image_password:
+ description:
+ - Password set for the administrative user.
+ required: false
+ version_added: '2.2'
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ required: false
+ version_added: '2.2'
+ disk_type:
+ description:
+ - The disk type of the volume.
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ required: false
+ default: UNKNOWN
+ choices: ["LINUX", "WINDOWS", "UNKNOWN" , "OTHER"]
+ count:
+ description:
+ - The number of volumes you wish to create.
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ choices: ["yes", "no"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ required: false
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ required: false
+ default: 'present'
+ choices: ["present", "absent"]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Create Multiple Volumes
+
+- profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: yes
+ wait_timeout: 500
+ state: present
+
+# Remove Volumes
+
+- profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError, e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if(uuid_match.match(n)):
+ _delete_volume(module, profitbricks, datacenter, volume)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception as e:
+ module.fail_json(msg='failed to attach volume: %s' % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(default=None),
+ ssh_keys=dict(type='list', default=[]),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume_attachments.py b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume_attachments.py
new file mode 100644
index 0000000000..fe87594fdd
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/profitbricks/profitbricks_volume_attachments.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume.
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ required: true
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ required: true
+ volume:
+ description:
+ - The volume name or ID.
+ required: true
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ required: false
+ default: 'present'
+ choices: ["present", "absent"]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Attach a Volume
+
+- profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+# Detach a Volume
+
+- profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server= s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server= s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+main() \ No newline at end of file
diff --git a/lib/ansible/modules/extras/cloud/rackspace/__init__.py b/lib/ansible/modules/extras/cloud/rackspace/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/rackspace/rax_clb_ssl.py b/lib/ansible/modules/extras/cloud/rackspace/rax_clb_ssl.py
new file mode 100644
index 0000000000..2013b8c4d8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/rax_clb_ssl.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION='''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+version_added: "2.0"
+options:
+ loadbalancer:
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ private_key:
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ wait_timeout:
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson
+extends_documentation_fragment: rackspace
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout / 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.iteritems():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException, e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException, e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException, e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+main()
diff --git a/lib/ansible/modules/extras/cloud/rackspace/rax_mon_alarm.py b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_alarm.py
new file mode 100644
index 0000000000..a3f29e22f5
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_alarm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ choices: [ "yes", "no" ]
+ metadata:
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/lib/ansible/modules/extras/cloud/rackspace/rax_mon_check.py b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_check.py
new file mode 100644
index 0000000000..14b86864e2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_check.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ entity_id:
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ choices:
+ - remote.dns
+ - remote.ftp-banner
+ - remote.http
+ - remote.imap-banner
+ - remote.mssql-banner
+ - remote.mysql-banner
+ - remote.ping
+ - remote.pop3-banner
+ - remote.postgresql-banner
+ - remote.smtp-banner
+ - remote.smtp
+ - remote.ssh
+ - remote.tcp
+ - remote.telnet-banner
+ - agent.filesystem
+ - agent.memory
+ - agent.load_average
+ - agent.cpu
+ - agent.disk
+ - agent.network
+ - agent.plugin
+ required: true
+ monitoring_zones_poll:
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ choices: [ "yes", "no" ]
+ metadata:
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.iteritems():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/lib/ansible/modules/extras/cloud/rackspace/rax_mon_entity.py b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_entity.py
new file mode 100644
index 0000000000..7369aaafa3
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_entity.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+version_added: "2.0"
+options:
+ label:
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ agent_id:
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification.py b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification.py
new file mode 100644
index 0000000000..d7b6692dc2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ label:
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification_plan.py b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification_plan.py
new file mode 100644
index 0000000000..5bb3fa1652
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/rackspace/rax_mon_notification_plan.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ label:
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/lib/ansible/modules/extras/cloud/smartos/__init__.py b/lib/ansible/modules/extras/cloud/smartos/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/smartos/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/smartos/smartos_image_facts.py b/lib/ansible/modules/extras/cloud/smartos/smartos_image_facts.py
new file mode 100644
index 0000000000..189389de72
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/smartos/smartos_image_facts.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: smartos_image_facts
+short_description: Get SmartOS image details.
+description:
+ - Retrieve facts about all installed images on SmartOS. Facts will be
+ inserted to the ansible_facts key.
+version_added: "2.2"
+author: Adam Števko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More informaton can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Return facts about all installed images.
+smartos_image_facts:
+
+# Return all private active Linux images.
+smartos_image_facts: filters="os=linux state=active public=false"
+
+# Show, how many clones does every image have.
+smartos_image_facts:
+
+debug: msg="{{ smartos_images[item]['name'] }}-{{smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+with_items: smartos_images.keys()
+'''
+
+RETURN = '''
+# this module returns ansible_facts
+'''
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+
+ image_facts = ImageFacts(module)
+
+ data = {}
+ data['smartos_images'] = image_facts.return_all_installed_images()
+
+ module.exit_json(ansible_facts=data)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/cloud/softlayer/__init__.py b/lib/ansible/modules/extras/cloud/softlayer/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/softlayer/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/softlayer/sl_vm.py b/lib/ansible/modules/extras/cloud/softlayer/sl_vm.py
new file mode 100644
index 0000000000..d82b1da72d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/softlayer/sl_vm.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances. When created, optionally waits for it to be 'running'.
+version_added: "2.1"
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option
+ required: false
+ default: null
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance
+ required: false
+ default: null
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed
+ required: false
+ default: null
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance
+ required: false
+ default: null
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed
+ required: false
+ default: true
+ private:
+ description:
+ - Flag to determine if the instance should be private only
+ required: false
+ default: false
+ dedicated:
+ description:
+ - Falg to determine if the instance should be deployed in dedicated space
+ required: false
+ default: false
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance
+ required: false
+ default: true
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance
+ required: true
+ default: null
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance
+ required: true
+ default: null
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance
+ required: true
+ default: [25]
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance
+ required: false
+ default: null
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance
+ required: false
+ default: null
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance
+ required: false
+ default: 10
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC
+ required: false
+ default: null
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC
+ required: false
+ default: null
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance
+ required: false
+ default: null
+ post_uri:
+ description:
+ - URL of a post provisioning script ot be loaded and exectued on virtual instance
+ required: false
+ default: null
+ state:
+ description:
+ - Create, or cancel a virtual instance. Specify "present" for create, "absent" to cancel.
+ required: false
+ default: 'present'
+ wait:
+ description:
+ - Flag used to wait for active status before returning
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - time in seconds before wait returns
+ required: false
+ default: 600
+
+requirements:
+ - "python >= 2.6"
+ - "softlayer >= 4.1.1"
+author: "Matt Colton (@mcltn)"
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Build instance request
+ local_action:
+ module: sl_vm
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: True
+ private: False
+ dedicated: False
+ local_disk: True
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: False
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Build instances request
+ local_action:
+ module: sl_vm
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - { hostname: 'instance-2', domain: 'anydomain.com', datacenter: 'dal09', tags: ['ansible-module-test', 'ansible-module-test-slaves'], hourly: True, private: False, dedicated: False, local_disk: True, cpus: 1, memory: 1024, disks: [25,100], os_code: 'UBUNTU_LATEST', ssh_keys: [], wait: True }
+ - { hostname: 'instance-3', domain: 'anydomain.com', datacenter: 'dal09', tags: ['ansible-module-test', 'ansible-module-test-slaves'], hourly: True, private: False, dedicated: False, local_disk: True, cpus: 1, memory: 1024, disks: [25,100], os_code: 'UBUNTU_LATEST', ssh_keys: [], wait: True }
+
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Cancel by tag
+ local_action:
+ module: sl_vm
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import time
+
+#TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01','ams03','dal01','dal05','dal06','dal09','fra02','hkg02','hou02','lon02','mel01','mex01','mil01','mon01','par01','sjc01','sjc03','sao01','sea01','sng01','syd01','tok02','tor01','wdc01','wdc04']
+CPU_SIZES = [1,2,4,8,16]
+MEMORY_SIZES = [1024,2048,4096,6144,8192,12288,16384,32768,49152,65536]
+INITIALDISK_SIZES = [25,100]
+LOCALDISK_SIZES = [25,100,150,200,300]
+SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,750,1000,1500,2000]
+NIC_SPEEDS = [10,100,1000]
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname = module.params.get('hostname'),
+ domain = module.params.get('domain'),
+ datacenter = module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') != None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') != None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname = module.params.get('hostname'),
+ domain = module.params.get('domain'),
+ cpus = module.params.get('cpus'),
+ memory = module.params.get('memory'),
+ hourly = module.params.get('hourly'),
+ datacenter = module.params.get('datacenter'),
+ os_code = module.params.get('os_code'),
+ image_id = module.params.get('image_id'),
+ local_disk = module.params.get('local_disk'),
+ disks = module.params.get('disks'),
+ ssh_keys = module.params.get('ssh_keys'),
+ nic_speed = module.params.get('nic_speed'),
+ private = module.params.get('private'),
+ public_vlan = module.params.get('public_vlan'),
+ private_vlan = module.params.get('private_vlan'),
+ dedicated = module.params.get('dedicated'),
+ post_uri = module.params.get('post_uri'),
+ tags = tags)
+
+ if instance != None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module,id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') == None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, basestring):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(),
+ hostname=dict(),
+ domain=dict(),
+ datacenter=dict(choices=DATACENTERS),
+ tags=dict(),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ disks=dict(type='list', default=[25]),
+ os_code=dict(),
+ image_id=dict(),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(),
+ private_vlan=dict(),
+ ssh_keys=dict(type='list', default=[]),
+ post_uri=dict(),
+ state=dict(default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600)
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') == True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/__init__.py b/lib/ansible/modules/extras/cloud/vmware/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/vmware/vca_fw.py b/lib/ansible/modules/extras/cloud/vmware/vca_fw.py
new file mode 100644
index 0000000000..17cc093eb5
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vca_fw.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: vca_fw
+short_description: add remove firewall rules in a gateway in a vca
+description:
+ - Adds or removes firewall rules from a gateway in a vca environment
+version_added: "2.0"
+author: Peter Sprygada (@privateip)
+options:
+ fw_rules:
+ description:
+ - A list of firewall rules to be added to the gateway, Please see examples on valid entries
+ required: True
+ default: false
+extends_documentation_fragment: vca.documentation
+'''
+
+EXAMPLES = '''
+
+#Add a set of firewall rules
+
+- hosts: localhost
+ connection: local
+ tasks:
+ - vca_fw:
+ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
+ vdc_name: 'benz_ansible'
+ state: 'absent'
+ fw_rules:
+ - description: "ben testing"
+ source_ip: "Any"
+ dest_ip: 192.0.2.23
+ - description: "ben testing 2"
+ source_ip: 192.0.2.50
+ source_port: "Any"
+ dest_port: "22"
+ dest_ip: 192.0.2.101
+ is_enable: "true"
+ enable_logging: "false"
+ protocol: "Tcp"
+ policy: "allow"
+
+'''
+
+try:
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
+except ImportError:
+ # normally set a flag here but it will be caught when testing for
+ # the existence of pyvcloud (see module_utils/vca.py). This just
+ # protects against generating an exception at runtime
+ pass
+
+VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any']
+VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description',
+ 'dest_ip', 'dest_port', 'source_ip', 'source_port',
+ 'protocol']
+
+def protocol_to_tuple(protocol):
+ return (protocol.get_Tcp(),
+ protocol.get_Udp(),
+ protocol.get_Icmp(),
+ protocol.get_Other(),
+ protocol.get_Any())
+
+def protocol_to_string(protocol):
+ protocol = protocol_to_tuple(protocol)
+ if protocol[0] is True:
+ return 'Tcp'
+ elif protocol[1] is True:
+ return 'Udp'
+ elif protocol[2] is True:
+ return 'Icmp'
+ elif protocol[3] is True:
+ return 'Other'
+ elif protocol[4] is True:
+ return 'Any'
+
+def protocol_to_type(protocol):
+ try:
+ protocols = ProtocolsType()
+ setattr(protocols, protocol, True)
+ return protocols
+ except AttributeError:
+ raise VcaError("The value in protocol is not valid")
+
+def validate_fw_rules(fw_rules):
+ for rule in fw_rules:
+ for k in rule.keys():
+ if k not in VALID_RULE_KEYS:
+ raise VcaError("%s is not a valid key in fw rules, please "
+ "check above.." % k, valid_keys=VALID_RULE_KEYS)
+
+ rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower()
+ rule['dest_ip'] = rule.get('dest_ip', 'Any').lower()
+ rule['source_port'] = str(rule.get('source_port', 'Any')).lower()
+ rule['source_ip'] = rule.get('source_ip', 'Any').lower()
+ rule['protocol'] = rule.get('protocol', 'Any').lower()
+ rule['policy'] = rule.get('policy', 'allow').lower()
+ rule['is_enable'] = rule.get('is_enable', True)
+ rule['enable_logging'] = rule.get('enable_logging', False)
+ rule['description'] = rule.get('description', 'rule added by Ansible')
+
+ return fw_rules
+
+def fw_rules_to_dict(rules):
+ fw_rules = list()
+ for rule in rules:
+ fw_rules.append(
+ dict(
+ dest_port=rule.get_DestinationPortRange().lower(),
+ dest_ip=rule.get_DestinationIp().lower().lower(),
+ source_port=rule.get_SourcePortRange().lower(),
+ source_ip=rule.get_SourceIp().lower(),
+ protocol=protocol_to_string(rule.get_Protocols()).lower(),
+ policy=rule.get_Policy().lower(),
+ is_enable=rule.get_IsEnabled(),
+ enable_logging=rule.get_EnableLogging(),
+ description=rule.get_Description()
+ )
+ )
+ return fw_rules
+
+def create_fw_rule(is_enable, description, policy, protocol, dest_port,
+ dest_ip, source_port, source_ip, enable_logging):
+
+ return FirewallRuleType(IsEnabled=is_enable,
+ Description=description,
+ Policy=policy,
+ Protocols=protocol_to_type(protocol),
+ DestinationPortRange=dest_port,
+ DestinationIp=dest_ip,
+ SourcePortRange=source_port,
+ SourceIp=source_ip,
+ EnableLogging=enable_logging)
+
+def main():
+ argument_spec = vca_argument_spec()
+ argument_spec.update(
+ dict(
+ fw_rules = dict(required=True, type='list'),
+ gateway_name = dict(default='gateway'),
+ state = dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ fw_rules = module.params.get('fw_rules')
+ gateway_name = module.params.get('gateway_name')
+ vdc_name = module.params['vdc_name']
+
+ vca = vca_login(module)
+
+ gateway = vca.get_gateway(vdc_name, gateway_name)
+ if not gateway:
+ module.fail_json(msg="Not able to find the gateway %s, please check "
+ "the gateway_name param" % gateway_name)
+
+ fwservice = gateway._getFirewallService()
+
+ rules = gateway.get_fw_rules()
+ current_rules = fw_rules_to_dict(rules)
+
+ try:
+ desired_rules = validate_fw_rules(fw_rules)
+ except VcaError as e:
+ module.fail_json(msg=e.message)
+
+ result = dict(changed=False)
+ result['current_rules'] = current_rules
+ result['desired_rules'] = desired_rules
+
+ updates = list()
+ additions = list()
+ deletions = list()
+
+ for (index, rule) in enumerate(desired_rules):
+ try:
+ if rule != current_rules[index]:
+ updates.append((index, rule))
+ except IndexError:
+ additions.append(rule)
+
+ eol = len(current_rules) > len(desired_rules)
+ if eol > 0:
+ for rule in current_rules[eos:]:
+ deletions.append(rule)
+
+ for rule in additions:
+ if not module.check_mode:
+ rule['protocol'] = rule['protocol'].capitalize()
+ gateway.add_fw_rule(**rule)
+ result['changed'] = True
+
+ for index, rule in updates:
+ if not module.check_mode:
+ rule = create_fw_rule(**rule)
+ fwservice.replace_FirewallRule_at(index, rule)
+ result['changed'] = True
+
+ keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip']
+ for rule in deletions:
+ if not module.check_mode:
+ kwargs = dict([(k, v) for k, v in rule.items() if k in keys])
+ kwargs['protocol'] = protocol_to_string(kwargs['protocol'])
+ gateway.delete_fw_rule(**kwargs)
+ result['changed'] = True
+
+ if not module.check_mode and result['changed'] == True:
+ task = gateway.save_services_configuration()
+ if task:
+ vca.block_until_completed(task)
+
+ result['rules_updated'] = count=len(updates)
+ result['rules_added'] = count=len(additions)
+ result['rules_deleted'] = count=len(deletions)
+
+ return module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.vca import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vca_nat.py b/lib/ansible/modules/extras/cloud/vmware/vca_nat.py
new file mode 100644
index 0000000000..3381b3ced2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vca_nat.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: vca_nat
+short_description: add remove nat rules in a gateway in a vca
+description:
+ - Adds or removes nat rules from a gateway in a vca environment
+version_added: "2.0"
+author: Peter Sprygada (@privateip)
+options:
+ purge_rules:
+ description:
+ - If set to true, it will delete all rules in the gateway that are not given as paramter to this module.
+ required: false
+ default: false
+ nat_rules:
+ description:
+ - A list of rules to be added to the gateway, Please see examples on valid entries
+ required: True
+ default: false
+extends_documentation_fragment: vca.documentation
+'''
+
+EXAMPLES = '''
+
+#An example for a source nat
+
+- hosts: localhost
+ connection: local
+ tasks:
+ - vca_nat:
+ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
+ vdc_name: 'benz_ansible'
+ state: 'present'
+ nat_rules:
+ - rule_type: SNAT
+ original_ip: 192.0.2.42
+ translated_ip: 203.0.113.23
+
+#example for a DNAT
+- hosts: localhost
+ connection: local
+ tasks:
+ - vca_nat:
+ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
+ vdc_name: 'benz_ansible'
+ state: 'present'
+ nat_rules:
+ - rule_type: DNAT
+ original_ip: 203.0.113.23
+ original_port: 22
+ translated_ip: 192.0.2.42
+ translated_port: 22
+
+'''
+
+import time
+import xmltodict
+
+VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port',
+ 'translated_ip', 'translated_port', 'protocol']
+
+
+def validate_nat_rules(nat_rules):
+ for rule in nat_rules:
+ if not isinstance(rule, dict):
+ raise VcaError("nat rules must be a list of dictionaries, "
+ "Please check", valid_keys=VALID_RULE_KEYS)
+
+ for k in rule.keys():
+ if k not in VALID_RULE_KEYS:
+ raise VcaError("%s is not a valid key in nat rules, please "
+ "check above.." % k, valid_keys=VALID_RULE_KEYS)
+
+ rule['original_port'] = str(rule.get('original_port', 'any')).lower()
+ rule['original_ip'] = rule.get('original_ip', 'any').lower()
+ rule['translated_ip'] = rule.get('translated_ip', 'any').lower()
+ rule['translated_port'] = str(rule.get('translated_port', 'any')).lower()
+ rule['protocol'] = rule.get('protocol', 'any').lower()
+ rule['rule_type'] = rule.get('rule_type', 'DNAT').lower()
+
+ return nat_rules
+
+
+def nat_rules_to_dict(nat_rules):
+ result = []
+ for rule in nat_rules:
+ gw_rule = rule.get_GatewayNatRule()
+ result.append(
+ dict(
+ rule_type=rule.get_RuleType().lower(),
+ original_ip=gw_rule.get_OriginalIp().lower(),
+ original_port=(gw_rule.get_OriginalPort().lower() or 'any'),
+ translated_ip=gw_rule.get_TranslatedIp().lower(),
+ translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'),
+ protocol=(gw_rule.get_Protocol().lower() or 'any')
+ )
+ )
+ return result
+
+def rule_to_string(rule):
+ strings = list()
+ for key, value in rule.items():
+ strings.append('%s=%s' % (key, value))
+ return ', '.join(string)
+
+def main():
+ argument_spec = vca_argument_spec()
+ argument_spec.update(
+ dict(
+ nat_rules = dict(type='list', default=[]),
+ gateway_name = dict(default='gateway'),
+ purge_rules = dict(default=False, type='bool'),
+ state = dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ vdc_name = module.params.get('vdc_name')
+ state = module.params['state']
+ nat_rules = module.params['nat_rules']
+ gateway_name = module.params['gateway_name']
+ purge_rules = module.params['purge_rules']
+
+ if not purge_rules and not nat_rules:
+ module.fail_json(msg='Must define purge_rules or nat_rules')
+
+ vca = vca_login(module)
+
+ gateway = vca.get_gateway(vdc_name, gateway_name)
+ if not gateway:
+ module.fail_json(msg="Not able to find the gateway %s, please check "
+ "the gateway_name param" % gateway_name)
+
+ try:
+ desired_rules = validate_nat_rules(nat_rules)
+ except VcaError as e:
+ module.fail_json(msg=e.message)
+
+ rules = gateway.get_nat_rules()
+
+ result = dict(changed=False, rules_purged=0)
+
+ deletions = 0
+ additions = 0
+
+ if purge_rules is True and len(rules) > 0:
+ result['rules_purged'] = len(rules)
+ deletions = result['rules_purged']
+ rules = list()
+ if not module.check_mode:
+ gateway.del_all_nat_rules()
+ task = gateway.save_services_configuration()
+ vca.block_until_completed(task)
+ rules = gateway.get_nat_rules()
+ result['changed'] = True
+
+ current_rules = nat_rules_to_dict(rules)
+
+ result['current_rules'] = current_rules
+ result['desired_rules'] = desired_rules
+
+ for rule in desired_rules:
+ if rule not in current_rules:
+ additions += 1
+ if not module.check_mode:
+ gateway.add_nat_rule(**rule)
+ result['changed'] = True
+ result['rules_added'] = additions
+
+ result['delete_rule'] = list()
+ result['delete_rule_rc'] = list()
+ for rule in current_rules:
+ if rule not in desired_rules:
+ deletions += 1
+ if not module.check_mode:
+ result['delete_rule'].append(rule)
+ rc = gateway.del_nat_rule(**rule)
+ result['delete_rule_rc'].append(rc)
+ result['changed'] = True
+ result['rules_deleted'] = deletions
+
+ if not module.check_mode and (additions > 0 or deletions > 0):
+ task = gateway.save_services_configuration()
+ vca.block_until_completed(task)
+
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.vca import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vca_vapp.py b/lib/ansible/modules/extras/cloud/vmware/vca_vapp.py
new file mode 100644
index 0000000000..68ed5f255d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vca_vapp.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Ansible, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: vca_vapp
+short_description: Manages vCloud Air vApp instances.
+description:
+ - This module will actively managed vCloud Air vApp instances. Instances
+ can be created and deleted as well as both deployed and undeployed.
+version_added: "2.0"
+author: Peter Sprygada (@privateip)
+options:
+ vapp_name:
+ description:
+ - The name of the vCloud Air vApp instance
+ required: yes
+ template_name:
+ description:
+ - The name of the vApp template to use to create the vApp instance. If
+ the I(state) is not `absent` then the I(template_name) value must be
+ provided. The I(template_name) must be previously uploaded to the
+ catalog specified by I(catalog_name)
+ required: no
+ default: None
+ network_name:
+ description:
+ - The name of the network that should be attached to the virtual machine
+ in the vApp. The virtual network specified must already be created in
+ the vCloud Air VDC. If the I(state) is not 'absent' then the
+ I(network_name) argument must be provided.
+ required: no
+ default: None
+ network_mode:
+ description:
+ - Configures the mode of the network connection.
+ required: no
+ default: pool
+ choices: ['pool', 'dhcp', 'static']
+ vm_name:
+ description:
+ - The name of the virtual machine instance in the vApp to manage.
+ required: no
+ default: None
+ vm_cpus:
+ description:
+ - The number of vCPUs to configure for the VM in the vApp. If the
+ I(vm_name) argument is provided, then this becomes a per VM setting
+ otherwise it is applied to all VMs in the vApp.
+ required: no
+ default: None
+ vm_memory:
+ description:
+ - The amount of memory in MB to allocate to VMs in the vApp. If the
+ I(vm_name) argument is provided, then this becomes a per VM setting
+ otherise it is applied to all VMs in the vApp.
+ required: no
+ default: None
+ operation:
+ description:
+ - Specifies an operation to be performed on the vApp.
+ required: no
+ default: noop
+ choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset']
+ state:
+ description:
+ - Configures the state of the vApp.
+ required: no
+ default: present
+ choices: ['present', 'absent', 'deployed', 'undeployed']
+ username:
+ description:
+ - The vCloud Air username to use during authentication
+ required: false
+ default: None
+ password:
+ description:
+ - The vCloud Air password to use during authentication
+ required: false
+ default: None
+ org:
+ description:
+ - The org to login to for creating vapp, mostly set when the service_type is vdc.
+ required: false
+ default: None
+ instance_id:
+ description:
+ - The instance id in a vchs environment to be used for creating the vapp
+ required: false
+ default: None
+ host:
+ description:
+ - The authentication host to be used when service type is vcd.
+ required: false
+ default: None
+ api_version:
+ description:
+ - The api version to be used with the vca
+ required: false
+ default: "5.7"
+ service_type:
+ description:
+ - The type of service we are authenticating against
+ required: false
+ default: vca
+ choices: [ "vca", "vchs", "vcd" ]
+ vdc_name:
+ description:
+ - The name of the virtual data center (VDC) where the vm should be created or contains the vAPP.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+
+- name: Creates a new vApp in a VCA instance
+ vca_vapp:
+ vapp_name: tower
+ state=present
+ template_name='Ubuntu Server 12.04 LTS (amd64 20150127)'
+ vdc_name=VDC1
+ instance_id=<your instance id here>
+ username=<your username here>
+ password=<your password here>
+
+'''
+
+DEFAULT_VAPP_OPERATION = 'noop'
+
+VAPP_STATUS = {
+ 'Powered off': 'poweroff',
+ 'Powered on': 'poweron',
+ 'Suspended': 'suspend'
+}
+
+VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed']
+VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown',
+ 'reboot', 'reset', 'noop']
+
+
+def get_instance(module):
+ vapp_name = module.params['vapp_name']
+ inst = dict(vapp_name=vapp_name, state='absent')
+ try:
+ vapp = module.get_vapp(vapp_name)
+ if vapp:
+ status = module.vca.get_status(vapp.me.get_status())
+ inst['status'] = VAPP_STATUS.get(status, 'unknown')
+ inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed'
+ return inst
+ except VcaError:
+ return inst
+
+def create(module):
+ vdc_name = module.params['vdc_name']
+ vapp_name = module.params['vapp_name']
+ template_name = module.params['template_name']
+ catalog_name = module.params['catalog_name']
+ network_name = module.params['network_name']
+ network_mode = module.params['network_mode']
+ vm_name = module.params['vm_name']
+ vm_cpus = module.params['vm_cpus']
+ vm_memory = module.params['vm_memory']
+ deploy = module.params['state'] == 'deploy'
+ poweron = module.params['operation'] == 'poweron'
+
+ task = module.vca.create_vapp(vdc_name, vapp_name, template_name,
+ catalog_name, network_name, network_mode,
+ vm_name, vm_cpus, vm_memory, deploy, poweron)
+
+ module.vca.block_until_completed(task)
+
+def delete(module):
+ vdc_name = module.params['vdc_name']
+ vapp_name = module.params['vapp_name']
+ module.vca.delete_vapp(vdc_name, vapp_name)
+
+def do_operation(module):
+ vapp_name = module.params['vapp_name']
+ operation = module.params['operation']
+
+ vm_name = module.params.get('vm_name')
+ vm = None
+ if vm_name:
+ vm = module.get_vm(vapp_name, vm_name)
+
+ if operation == 'poweron':
+ operation = 'powerOn'
+ elif operation == 'poweroff':
+ operation = 'powerOff'
+
+ cmd = 'power:%s' % operation
+ module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm)
+
+def set_state(module):
+ state = module.params['state']
+ vapp = module.get_vapp(module.params['vapp_name'])
+ if state == 'deployed':
+ action = module.params['operation'] == 'poweron'
+ if not vapp.deploy(action):
+ module.fail('unable to deploy vapp')
+ elif state == 'undeployed':
+ action = module.params['operation']
+ if action == 'poweroff':
+ action = 'powerOff'
+ elif action != 'suspend':
+ action = None
+ if not vapp.undeploy(action):
+ module.fail('unable to undeploy vapp')
+
+
+def main():
+
+ argument_spec = dict(
+ vapp_name=dict(required=True),
+ vdc_name=dict(required=True),
+ template_name=dict(),
+ catalog_name=dict(default='Public Catalog'),
+ network_name=dict(),
+ network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']),
+ vm_name=dict(),
+ vm_cpus=dict(),
+ vm_memory=dict(),
+ operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS),
+ state=dict(default='present', choices=VAPP_STATES)
+ )
+
+ module = VcaAnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ operation = module.params['operation']
+
+ instance = get_instance(module)
+
+ result = dict(changed=False)
+
+ if instance and state == 'absent':
+ if not module.check_mode:
+ delete(module)
+ result['changed'] = True
+
+ elif state != 'absent':
+ if instance['state'] == 'absent':
+ if not module.check_mode:
+ create(module)
+ result['changed'] = True
+
+ elif instance['state'] != state and state != 'present':
+ if not module.check_mode:
+ set_state(module)
+ result['changed'] = True
+
+ if operation != instance.get('status') and operation != 'noop':
+ if not module.check_mode:
+ do_operation(module)
+ result['changed'] = True
+
+ return module.exit(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.vca import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_cluster.py b/lib/ansible/modules/extras/cloud/vmware/vmware_cluster.py
new file mode 100644
index 0000000000..8067d36de2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_cluster.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_cluster
+short_description: Create VMware vSphere Cluster
+description:
+ - Create VMware vSphere Cluster
+version_added: 2.0
+author: Joseph Callen (@jcpowermac)
+notes:
+requirements:
+ - Tested on ESXi 5.5
+ - PyVmomi installed
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter the cluster will be created in.
+ required: True
+ cluster_name:
+ description:
+ - The name of the cluster that will be created
+ required: True
+ enable_ha:
+ description:
+ - If set to True will enable HA when the cluster is created.
+ required: False
+ default: False
+ enable_drs:
+ description:
+ - If set to True will enable DRS when the cluster is created.
+ required: False
+ default: False
+ enable_vsan:
+ description:
+ - If set to True will enable vSAN when the cluster is created.
+ required: False
+ default: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_cluster command from Ansible Playbooks
+- name: Create Cluster
+ local_action: >
+ vmware_cluster
+ hostname="{{ ansible_ssh_host }}" username=root password=vmware
+ datacenter_name="datacenter"
+ cluster_name="cluster"
+ enable_ha=True
+ enable_drs=True
+ enable_vsan=True
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareCluster(object):
+ def __init__(self, module):
+ self.module = module
+ self.enable_ha = module.params['enable_ha']
+ self.enable_drs = module.params['enable_drs']
+ self.enable_vsan = module.params['enable_vsan']
+ self.cluster_name = module.params['cluster_name']
+ self.desired_state = module.params['state']
+ self.datacenter = None
+ self.cluster = None
+ self.content = connect_to_api(module)
+ self.datacenter_name = module.params['datacenter_name']
+
+ def process_state(self):
+ cluster_states = {
+ 'absent': {
+ 'present': self.state_destroy_cluster,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_cluster,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_cluster,
+ }
+ }
+ current_state = self.check_cluster_configuration()
+ # Based on the desired_state and the current_state call
+ # the appropriate method from the dictionary
+ cluster_states[self.desired_state][current_state]()
+
+ def configure_ha(self):
+ das_config = vim.cluster.DasConfigInfo()
+ das_config.enabled = self.enable_ha
+ das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
+ das_config.admissionControlPolicy.failoverLevel = 2
+ return das_config
+
+ def configure_drs(self):
+ drs_config = vim.cluster.DrsConfigInfo()
+ drs_config.enabled = self.enable_drs
+ # Set to partially automated
+ drs_config.vmotionRate = 3
+ return drs_config
+
+ def configure_vsan(self):
+ vsan_config = vim.vsan.cluster.ConfigInfo()
+ vsan_config.enabled = self.enable_vsan
+ vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
+ vsan_config.defaultConfig.autoClaimStorage = False
+ return vsan_config
+
+ def state_create_cluster(self):
+ try:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ cluster_config_spec.dasConfig = self.configure_ha()
+ cluster_config_spec.drsConfig = self.configure_drs()
+ if self.enable_vsan:
+ cluster_config_spec.vsanConfig = self.configure_vsan()
+ if not self.module.check_mode:
+ self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
+ self.module.exit_json(changed=True)
+ except vim.fault.DuplicateName:
+ self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
+ except vmodl.fault.InvalidArgument:
+ self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
+ except vim.fault.InvalidName:
+ self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
+ except vmodl.fault.NotSupported:
+ # This should never happen
+ self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ # This should never happen either
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_destroy_cluster(self):
+ changed = True
+ result = None
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+ except vim.fault.VimFault as vim_fault:
+ self.module.fail_json(msg=vim_fault.msg)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_cluster(self):
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ changed = True
+ result = None
+
+ if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
+ cluster_config_spec.dasConfig = self.configure_ha()
+ if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
+ cluster_config_spec.drsConfig = self.configure_drs()
+ if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
+ cluster_config_spec.vsanConfig = self.configure_vsan()
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except TaskError as task_e:
+ self.module.fail_json(msg=str(task_e))
+
+ def check_cluster_configuration(self):
+ try:
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist, "
+ "please create first with Ansible Module vmware_datacenter or manually."
+ % self.datacenter_name)
+ self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
+
+ if self.cluster is None:
+ return 'absent'
+ else:
+ desired_state = (self.enable_ha,
+ self.enable_drs,
+ self.enable_vsan)
+
+ current_state = (self.cluster.configurationEx.dasConfig.enabled,
+ self.cluster.configurationEx.drsConfig.enabled,
+ self.cluster.configurationEx.vsanConfigInfo.enabled)
+
+ if cmp(desired_state, current_state) != 0:
+ return 'update'
+ else:
+ return 'present'
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
+ cluster_name=dict(required=True, type='str'),
+ enable_ha=dict(default=False, required=False, type='bool'),
+ enable_drs=dict(default=False, required=False, type='bool'),
+ enable_vsan=dict(default=False, required=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_cluster = VMwareCluster(module)
+ vmware_cluster.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_datacenter.py b/lib/ansible/modules/extras/cloud/vmware/vmware_datacenter.py
new file mode 100644
index 0000000000..ef2fd2f1f7
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_datacenter.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_datacenter
+short_description: Manage VMware vSphere Datacenters
+description:
+ - Manage VMware vSphere Datacenters
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Kamil Szczygiel (@kamsz)"
+notes:
+ - Tested on vSphere 6.0
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the vSphere vCenter API server
+ required: True
+ username:
+ description:
+ - The username of the vSphere vCenter
+ required: True
+ aliases: ['user', 'admin']
+ password:
+ description:
+ - The password of the vSphere vCenter
+ required: True
+ aliases: ['pass', 'pwd']
+ datacenter_name:
+ description:
+ - The name of the datacenter the cluster will be created in.
+ required: True
+ state:
+ description:
+ - If the datacenter should be present or absent
+ choices: ['present', 'absent']
+ default: present
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_datacenter command from Ansible Playbooks
+- name: Create Datacenter
+ local_action: >
+ vmware_datacenter
+ hostname="{{ ansible_ssh_host }}" username=root password=vmware
+ datacenter_name="datacenter" state=present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def get_datacenter(context, module):
+ try:
+ datacenter_name = module.params.get('datacenter_name')
+ datacenter = find_datacenter_by_name(context, datacenter_name)
+ return datacenter
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+
+
+def create_datacenter(context, module):
+ datacenter_name = module.params.get('datacenter_name')
+ folder = context.rootFolder
+
+ try:
+ datacenter = get_datacenter(context, module)
+ changed = False
+ if not datacenter:
+ changed = True
+ if not module.check_mode:
+ folder.CreateDatacenter(name=datacenter_name)
+ module.exit_json(changed=changed)
+ except vim.fault.DuplicateName:
+ module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name)
+ except vim.fault.InvalidName:
+ module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name)
+ except vmodl.fault.NotSupported:
+ # This should never happen
+ module.fail_json(msg="Trying to create a datacenter on an incorrect folder object")
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+
+
+def destroy_datacenter(context, module):
+ result = None
+
+ try:
+ datacenter = get_datacenter(context, module)
+ changed = False
+ if datacenter:
+ changed = True
+ if not module.check_mode:
+ task = datacenter.Destroy_Task()
+ changed, result = wait_for_task(task)
+ module.exit_json(changed=changed, result=result)
+ except vim.fault.VimFault as vim_fault:
+ module.fail_json(msg=vim_fault.msg)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(required=True, type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ context = connect_to_api(module)
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_datacenter(context, module)
+
+ if state == 'absent':
+ destroy_datacenter(context, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.vmware import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_dns_config.py b/lib/ansible/modules/extras/cloud/vmware/vmware_dns_config.py
new file mode 100644
index 0000000000..57eda23b7d
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_dns_config.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_dns_config
+short_description: Manage VMware ESXi DNS Configuration
+description:
+ - Manage VMware ESXi DNS Configuration
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ change_hostname_to:
+ description:
+ - The hostname that an ESXi host should be changed to.
+ required: True
+ domainname:
+ description:
+ - The domain the ESXi host should be apart of.
+ required: True
+ dns_servers:
+ description:
+ - The DNS servers that the host should be configured to use.
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_dns_config command from Ansible Playbooks
+- name: Configure ESXi hostname and DNS servers
+ local_action:
+ module: vmware_dns_config
+ hostname: esxi_hostname
+ username: root
+ password: your_password
+ change_hostname_to: esx01
+ domainname: foo.org
+ dns_servers:
+ - 8.8.8.8
+ - 8.8.4.4
+'''
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def configure_dns(host_system, hostname, domainname, dns_servers):
+
+ changed = False
+ host_config_manager = host_system.configManager
+ host_network_system = host_config_manager.networkSystem
+ config = host_network_system.dnsConfig
+
+ config.dhcp = False
+
+ if config.address != dns_servers:
+ config.address = dns_servers
+ changed = True
+ if config.domainName != domainname:
+ config.domainName = domainname
+ changed = True
+ if config.hostName != hostname:
+ config.hostName = hostname
+ changed = True
+ if changed:
+ host_network_system.UpdateDnsConfig(config)
+
+ return changed
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
+ domainname=dict(required=True, type='str'),
+ dns_servers=dict(required=True, type='list')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ change_hostname_to = module.params['change_hostname_to']
+ domainname = module.params['domainname']
+ dns_servers = module.params['dns_servers']
+ try:
+ content = connect_to_api(module)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers)
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_dvs_host.py b/lib/ansible/modules/extras/cloud/vmware/vmware_dvs_host.py
new file mode 100644
index 0000000000..dcfb4ba7f5
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_dvs_host.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_dvs_host
+short_description: Add or remove a host from distributed virtual switch
+description:
+ - Add or remove a host from distributed virtual switch
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ esxi_hostname:
+ description:
+ - The ESXi hostname
+ required: True
+ switch_name:
+ description:
+ - The name of the Distributed vSwitch
+ required: True
+ vmnics:
+ description:
+ - The ESXi hosts vmnics to use with the Distributed vSwitch
+ required: True
+ state:
+ description:
+ - If the host should be present or absent attached to the vSwitch
+ choices: ['present', 'absent']
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_dvs_host command from Ansible Playbooks
+- name: Add Host to dVS
+ local_action:
+ module: vmware_dvs_host
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ esxi_hostname: esxi_hostname_as_listed_in_vcenter
+ switch_name: dvSwitch
+ vmnics:
+ - vmnic0
+ - vmnic1
+ state: present
+'''
+
+try:
+ import collections
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareDvsHost(object):
+ def __init__(self, module):
+ self.module = module
+ self.dv_switch = None
+ self.uplink_portgroup = None
+ self.host = None
+ self.dv_switch = None
+ self.nic = None
+ self.content = connect_to_api(self.module)
+ self.state = self.module.params['state']
+ self.switch_name = self.module.params['switch_name']
+ self.esxi_hostname = self.module.params['esxi_hostname']
+ self.vmnics = self.module.params['vmnics']
+
+ def process_state(self):
+ try:
+ dvs_host_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvs_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvs_host,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvs_host,
+ }
+ }
+
+ dvs_host_states[self.state][self.check_dvs_host_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def find_dvspg_by_name(self):
+ portgroups = self.dv_switch.portgroup
+
+ for pg in portgroups:
+ if pg.name == self.portgroup_name:
+ return pg
+ return None
+
+ def find_dvs_uplink_pg(self):
+ # There should only always be a single uplink port group on
+ # a distributed virtual switch
+
+ if len(self.dv_switch.config.uplinkPortgroup):
+ return self.dv_switch.config.uplinkPortgroup[0]
+ else:
+ return None
+
+ # operation should be edit, add and remove
+ def modify_dvs_host(self, operation):
+ spec = vim.DistributedVirtualSwitch.ConfigSpec()
+ spec.configVersion = self.dv_switch.config.configVersion
+ spec.host = [vim.dvs.HostMember.ConfigSpec()]
+ spec.host[0].operation = operation
+ spec.host[0].host = self.host
+
+ if operation in ("edit", "add"):
+ spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
+ count = 0
+
+ for nic in self.vmnics:
+ spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
+ spec.host[0].backing.pnicSpec[count].pnicDevice = nic
+ spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
+ count += 1
+
+ task = self.dv_switch.ReconfigureDvs_Task(spec)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_destroy_dvs_host(self):
+ operation = "remove"
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_dvs_host(self):
+ operation = "edit"
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_create_dvs_host(self):
+ operation = "add"
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def find_host_attached_dvs(self):
+ for dvs_host_member in self.dv_switch.config.host:
+ if dvs_host_member.config.host.name == self.esxi_hostname:
+ return dvs_host_member.config.host
+
+ return None
+
+ def check_uplinks(self):
+ pnic_device = []
+
+ for dvs_host_member in self.dv_switch.config.host:
+ if dvs_host_member.config.host == self.host:
+ for pnicSpec in dvs_host_member.config.backing.pnicSpec:
+ pnic_device.append(pnicSpec.pnicDevice)
+
+ return collections.Counter(pnic_device) == collections.Counter(self.vmnics)
+
+ def check_dvs_host_state(self):
+ self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
+
+ if self.dv_switch is None:
+ raise Exception("A distributed virtual switch %s does not exist" % self.switch_name)
+
+ self.uplink_portgroup = self.find_dvs_uplink_pg()
+
+ if self.uplink_portgroup is None:
+ raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s"
+ % self.switch_name)
+
+ self.host = self.find_host_attached_dvs()
+
+ if self.host is None:
+ # We still need the HostSystem object to add the host
+ # to the distributed vswitch
+ self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
+ if self.host is None:
+ self.module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % self.esxi_hostname)
+ return 'absent'
+ else:
+ if self.check_uplinks():
+ return 'present'
+ else:
+ return 'update'
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vmnics=dict(required=True, type='list'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_dvs_host = VMwareDvsHost(module)
+ vmware_dvs_host.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_dvs_portgroup.py b/lib/ansible/modules/extras/cloud/vmware/vmware_dvs_portgroup.py
new file mode 100644
index 0000000000..06b39672ed
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_dvs_portgroup.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_dvs_portgroup
+short_description: Create or remove a Distributed vSwitch portgroup
+description:
+ - Create or remove a Distributed vSwitch portgroup
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ portgroup_name:
+ description:
+ - The name of the portgroup that is to be created or deleted
+ required: True
+ switch_name:
+ description:
+ - The name of the distributed vSwitch the port group should be created on.
+ required: True
+ vlan_id:
+ description:
+ - The VLAN ID that should be configured with the portgroup
+ required: True
+ num_ports:
+ description:
+ - The number of ports the portgroup should contain
+ required: True
+ portgroup_type:
+ description:
+ - See VMware KB 1022312 regarding portgroup types
+ required: True
+ choices:
+ - 'earlyBinding'
+ - 'lateBinding'
+ - 'ephemeral'
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+ - name: Create Management portgroup
+ local_action:
+ module: vmware_dvs_portgroup
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ portgroup_name: Management
+ switch_name: dvSwitch
+ vlan_id: 123
+ num_ports: 120
+ portgroup_type: earlyBinding
+ state: present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareDvsPortgroup(object):
+ def __init__(self, module):
+ self.module = module
+ self.dvs_portgroup = None
+ self.switch_name = self.module.params['switch_name']
+ self.portgroup_name = self.module.params['portgroup_name']
+ self.vlan_id = self.module.params['vlan_id']
+ self.num_ports = self.module.params['num_ports']
+ self.portgroup_type = self.module.params['portgroup_type']
+ self.dv_switch = None
+ self.state = self.module.params['state']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ dvspg_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvspg,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvspg,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvspg,
+ }
+ }
+ dvspg_states[self.state][self.check_dvspg_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def create_port_group(self):
+ config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+
+ config.name = self.portgroup_name
+ config.numPorts = self.num_ports
+
+ # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation
+ # but this is the correct managed object type.
+
+ config.defaultPortConfig = vim.VMwareDVSPortSetting()
+
+ # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the
+ # pyvmomi documentation but this is the correct managed object type
+ config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()
+ config.defaultPortConfig.vlan.inherited = False
+ config.defaultPortConfig.vlan.vlanId = self.vlan_id
+ config.type = self.portgroup_type
+
+ spec = [config]
+ task = self.dv_switch.AddDVPortgroup_Task(spec)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_destroy_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ task = self.dvs_portgroup.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_dvspg(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def state_create_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.create_port_group()
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_dvspg_state(self):
+ self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
+
+ if self.dv_switch is None:
+ raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name)
+ self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)
+
+ if self.dvs_portgroup is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int'),
+ num_ports=dict(required=True, type='int'),
+ portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_dvs_portgroup = VMwareDvsPortgroup(module)
+ vmware_dvs_portgroup.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_dvswitch.py b/lib/ansible/modules/extras/cloud/vmware/vmware_dvswitch.py
new file mode 100644
index 0000000000..fb9d530605
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_dvswitch.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_dvswitch
+short_description: Create or remove a distributed vSwitch
+description:
+ - Create or remove a distributed vSwitch
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter that will contain the dvSwitch
+ required: True
+ switch_name:
+ description:
+ - The name of the switch to create or remove
+ required: True
+ mtu:
+ description:
+ - The switch maximum transmission unit
+ required: True
+ uplink_quantity:
+ description:
+ - Quantity of uplink per ESXi host added to the switch
+ required: True
+ discovery_proto:
+ description:
+ - Link discovery protocol between Cisco and Link Layer discovery
+ choices:
+ - 'cdp'
+ - 'lldp'
+ required: True
+ discovery_operation:
+ description:
+ - Select the discovery operation
+ choices:
+ - 'both'
+ - 'none'
+ - 'advertise'
+ - 'listen'
+ state:
+ description:
+ - Create or remove dvSwitch
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+EXAMPLES = '''
+- name: Create dvswitch
+ local_action:
+ module: vmware_dvswitch
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ datacenter_name: datacenter
+ switch_name: dvSwitch
+ mtu: 9000
+ uplink_quantity: 2
+ discovery_proto: lldp
+ discovery_operation: both
+ state: present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+class VMwareDVSwitch(object):
+ def __init__(self, module):
+ self.module = module
+ self.dvs = None
+ self.switch_name = self.module.params['switch_name']
+ self.datacenter_name = self.module.params['datacenter_name']
+ self.mtu = self.module.params['mtu']
+ self.uplink_quantity = self.module.params['uplink_quantity']
+ self.discovery_proto = self.module.params['discovery_proto']
+ self.discovery_operation = self.module.params['discovery_operation']
+ self.switch_name = self.module.params['switch_name']
+ self.state = self.module.params['state']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ dvs_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvs,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvs,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvs,
+ }
+ }
+ dvs_states[self.state][self.check_dvs_configuration()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+
+ def create_dvswitch(self, network_folder):
+ result = None
+ changed = False
+
+ spec = vim.DistributedVirtualSwitch.CreateSpec()
+ spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
+ spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
+
+ spec.configSpec.name = self.switch_name
+ spec.configSpec.maxMtu = self.mtu
+ spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
+ spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
+ spec.productInfo = vim.dvs.ProductSpec()
+ spec.productInfo.name = "DVS"
+ spec.productInfo.vendor = "VMware"
+
+ for count in range(1, self.uplink_quantity+1):
+ spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
+
+ task = network_folder.CreateDVS_Task(spec)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_destroy_dvs(self):
+ task = self.dvs.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_update_dvs(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def state_create_dvs(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ dc = find_datacenter_by_name(self.content, self.datacenter_name)
+ changed, result = self.create_dvswitch(dc.networkFolder)
+
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_dvs_configuration(self):
+ self.dvs = find_dvs_by_name(self.content, self.switch_name)
+ if self.dvs is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ mtu=dict(required=True, type='int'),
+ uplink_quantity=dict(required=True, type='int'),
+ discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
+ discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_dvswitch = VMwareDVSwitch(module)
+ vmware_dvswitch.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_guest.py b/lib/ansible/modules/extras/cloud/vmware/vmware_guest.py
new file mode 100644
index 0000000000..3d7ab028e8
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_guest.py
@@ -0,0 +1,959 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_guest
+short_description: Manages virtualmachines in vcenter
+description:
+ - Uses pyvmomi to ...
+ - copy a template to a new virtualmachine
+ - poweron/poweroff/restart a virtualmachine
+ - remove a virtualmachine
+version_added: 2.2
+author: James Tanner (@jctanner) <tanner.jc@gmail.com>
+notes:
+ - Tested on vSphere 6.0
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ state:
+ description:
+ - What state should the virtualmachine be in?
+ required: True
+ choices: ['present', 'absent', 'poweredon', 'poweredoff', 'restarted', 'suspended']
+ name:
+ description:
+ - Name of the newly deployed guest
+ required: True
+ name_match:
+ description:
+ - If multiple vms matching the name, use the first or last found
+ required: False
+ default: 'first'
+ choices: ['first', 'last']
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is vmware's unique identifier.
+ - This is required if name is not supplied.
+ required: False
+ template:
+ description:
+ - Name of the template to deploy, if needed to create the guest (state=present).
+ - If the guest exists already this setting will be ignored.
+ required: False
+ folder:
+ description:
+ - Destination folder path for the new guest
+ required: False
+ hardware:
+ description:
+ - Attributes such as cpus, memory, osid, and disk controller
+ required: False
+ disk:
+ description:
+ - A list of disks to add
+ required: False
+ nic:
+ description:
+ - A list of nics to add
+ required: False
+ wait_for_ip_address:
+ description:
+ - Wait until vcenter detects an IP address for the guest
+ required: False
+ force:
+ description:
+ - Ignore warnings and complete the actions
+ required: False
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation
+ required: True
+ esxi_hostname:
+ description:
+ - The esxi hostname where the VM will run.
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+#
+# Create a VM from a template
+#
+ - name: create the VM
+ vmware_guest:
+ validate_certs: False
+ hostname: 192.0.2.44
+ username: administrator@vsphere.local
+ password: vmware
+ name: testvm_2
+ state: poweredon
+ folder: testvms
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: g73_datastore
+ nic:
+ - type: vmxnet3
+ network: VM Network
+ network_type: standard
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ osid: centos64guest
+ scsi: paravirtual
+ datacenter: datacenter1
+ esxi_hostname: 192.0.2.117
+ template: template_el7
+ wait_for_ip_address: yes
+ register: deploy
+
+#
+# Gather facts only
+#
+ - name: gather the VM facts
+ vmware_guest:
+ validate_certs: False
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ name: testvm_2
+ esxi_hostname: 192.168.1.117
+ register: facts
+'''
+
+RETURN = """
+instance:
+ descripton: metadata about the new virtualmachine
+ returned: always
+ type: dict
+ sample: None
+"""
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+HAS_PYVMOMI = False
+try:
+ import pyVmomi
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+import os
+import string
+import time
+
+from ansible.module_utils.urls import fetch_url
+
+class PyVmomiHelper(object):
+
+ def __init__(self, module):
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi module required')
+
+ self.module = module
+ self.params = module.params
+ self.si = None
+ self.smartconnect()
+ self.datacenter = None
+ self.folders = None
+ self.foldermap = None
+
+ def smartconnect(self):
+ self.content = connect_to_api(self.module)
+
+ def _build_folder_tree(self, folder, tree={}, treepath=None):
+
+ tree = {'virtualmachines': [],
+ 'subfolders': {},
+ 'vimobj': folder,
+ 'name': folder.name}
+
+ children = None
+ if hasattr(folder, 'childEntity'):
+ children = folder.childEntity
+
+ if children:
+ for child in children:
+ if child == folder or child in tree:
+ continue
+ if type(child) == vim.Folder:
+ ctree = self._build_folder_tree(child)
+ tree['subfolders'][child] = dict.copy(ctree)
+ elif type(child) == vim.VirtualMachine:
+ tree['virtualmachines'].append(child)
+ else:
+ if type(folder) == vim.VirtualMachine:
+ return folder
+ return tree
+
+
+ def _build_folder_map(self, folder, vmap={}, inpath='/'):
+
+ ''' Build a searchable index for vms+uuids+folders '''
+
+ if type(folder) == tuple:
+ folder = folder[1]
+
+ if not 'names' in vmap:
+ vmap['names'] = {}
+ if not 'uuids' in vmap:
+ vmap['uuids'] = {}
+ if not 'paths' in vmap:
+ vmap['paths'] = {}
+
+ if inpath == '/':
+ thispath = '/vm'
+ else:
+ thispath = os.path.join(inpath, folder['name'])
+
+ if thispath not in vmap['paths']:
+ vmap['paths'][thispath] = []
+
+ # helpful for isolating folder objects later on
+ if not 'path_by_fvim' in vmap:
+ vmap['path_by_fvim'] = {}
+ if not 'fvim_by_path' in vmap:
+ vmap['fvim_by_path'] = {}
+ # store object by path and store path by object
+ vmap['fvim_by_path'][thispath] = folder['vimobj']
+ vmap['path_by_fvim'][folder['vimobj']] = thispath
+
+ # helpful for isolating vm objects later on
+ if not 'path_by_vvim' in vmap:
+ vmap['path_by_vvim'] = {}
+ if not 'vvim_by_path' in vmap:
+ vmap['vvim_by_path'] = {}
+ if thispath not in vmap['vvim_by_path']:
+ vmap['vvim_by_path'][thispath] = []
+
+
+ for item in folder.items():
+ k = item[0]
+ v = item[1]
+
+ if k == 'name':
+ pass
+ elif k == 'subfolders':
+ for x in v.items():
+ vmap = self._build_folder_map(x, vmap=vmap, inpath=thispath)
+ elif k == 'virtualmachines':
+ for x in v:
+ if not x.config.name in vmap['names']:
+ vmap['names'][x.config.name] = []
+ vmap['names'][x.config.name].append(x.config.uuid)
+ vmap['uuids'][x.config.uuid] = x.config.name
+ vmap['paths'][thispath].append(x.config.uuid)
+
+ if x not in vmap['vvim_by_path'][thispath]:
+ vmap['vvim_by_path'][thispath].append(x)
+ if x not in vmap['path_by_vvim']:
+ vmap['path_by_vvim'][x] = thispath
+ return vmap
+
+ def getfolders(self):
+
+ if not self.datacenter:
+ self.get_datacenter()
+ self.folders = self._build_folder_tree(self.datacenter.vmFolder)
+ self.folder_map = self._build_folder_map(self.folders)
+ return (self.folders, self.folder_map)
+
+ def get_datacenter(self):
+ self.datacenter = get_obj(self.content, [vim.Datacenter],
+ self.params['datacenter'])
+
+ def getvm(self, name=None, uuid=None, folder=None, name_match=None):
+
+ # https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html
+ # self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder')
+
+ vm = None
+ folder_path = None
+
+ if uuid:
+ vm = self.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True)
+
+ elif folder:
+
+ if self.params['folder'].endswith('/'):
+ self.params['folder'] = self.params['folder'][0:-1]
+
+ # Build the absolute folder path to pass into the search method
+ searchpath = None
+ if self.params['folder'].startswith('/vm'):
+ searchpath = '%s' % self.params['datacenter']
+ searchpath += self.params['folder']
+ elif self.params['folder'].startswith('/'):
+ searchpath = '%s' % self.params['datacenter']
+ searchpath += '/vm' + self.params['folder']
+ else:
+ # need to look for matching absolute path
+ if not self.folders:
+ self.getfolders()
+ paths = self.folder_map['paths'].keys()
+ paths = [x for x in paths if x.endswith(self.params['folder'])]
+ if len(paths) > 1:
+ self.module.fail_json(msg='%s matches more than one folder. Please use the absolute path starting with /vm/' % self.params['folder'])
+ elif paths:
+ searchpath = paths[0]
+
+ if searchpath:
+ # get all objects for this path ...
+ fObj = self.content.searchIndex.FindByInventoryPath(searchpath)
+ if fObj:
+ if isinstance(fObj, vim.Datacenter):
+ fObj = fObj.vmFolder
+ for cObj in fObj.childEntity:
+ if not type(cObj) == vim.VirtualMachine:
+ continue
+ if cObj.name == name:
+ vm = cObj
+ break
+
+ else:
+ # FIXME - this is unused if folder has a default value
+ vmList = get_all_objs(self.content, [vim.VirtualMachine])
+ if name_match:
+ if name_match == 'first':
+ vm = get_obj(self.content, [vim.VirtualMachine], name)
+ elif name_match == 'last':
+ matches = []
+ vmList = get_all_objs(self.content, [vim.VirtualMachine])
+ for thisvm in vmList:
+ if thisvm.config.name == name:
+ matches.append(thisvm)
+ if matches:
+ vm = matches[-1]
+ else:
+ matches = []
+ vmList = get_all_objs(self.content, [vim.VirtualMachine])
+ for thisvm in vmList:
+ if thisvm.config.name == name:
+ matches.append(thisvm)
+ if len(matches) > 1:
+ module.fail_json(msg='more than 1 vm exists by the name %s. Please specify a uuid, or a folder, or a datacenter or name_match' % name)
+ if matches:
+ vm = matches[0]
+
+ return vm
+
+
+ def set_powerstate(self, vm, state, force):
+ """
+ Set the power status for a VM determined by the current and
+ requested states. force is forceful
+ """
+ facts = self.gather_facts(vm)
+ expected_state = state.replace('_', '').lower()
+ current_state = facts['hw_power_status'].lower()
+ result = {}
+
+ # Need Force
+ if not force and current_state not in ['poweredon', 'poweredoff']:
+ return "VM is in %s power state. Force is required!" % current_state
+
+ # State is already true
+ if current_state == expected_state:
+ result['changed'] = False
+ result['failed'] = False
+ else:
+ task = None
+ try:
+ if expected_state == 'poweredoff':
+ task = vm.PowerOff()
+
+ elif expected_state == 'poweredon':
+ task = vm.PowerOn()
+
+ elif expected_state == 'restarted':
+ if current_state in ('poweredon', 'poweringon', 'resetting'):
+ task = vm.Reset()
+ else:
+ result = {'changed': False, 'failed': True,
+ 'msg': "Cannot restart VM in the current state %s" % current_state}
+
+ except Exception:
+ result = {'changed': False, 'failed': True,
+ 'msg': get_exception()}
+
+ if task:
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ result = {'changed': True, 'failed': False}
+
+ # need to get new metadata if changed
+ if result['changed']:
+ newvm = self.getvm(uuid=vm.config.uuid)
+ facts = self.gather_facts(newvm)
+ result['instance'] = facts
+ return result
+
+
+ def gather_facts(self, vm):
+
+ ''' Gather facts from vim.VirtualMachine object. '''
+
+ facts = {
+ 'module_hw': True,
+ 'hw_name': vm.config.name,
+ 'hw_power_status': vm.summary.runtime.powerState,
+ 'hw_guest_full_name': vm.summary.guest.guestFullName,
+ 'hw_guest_id': vm.summary.guest.guestId,
+ 'hw_product_uuid': vm.config.uuid,
+ 'hw_processor_count': vm.config.hardware.numCPU,
+ 'hw_memtotal_mb': vm.config.hardware.memoryMB,
+ 'hw_interfaces':[],
+ 'ipv4': None,
+ 'ipv6': None,
+ }
+
+ netDict = {}
+ for device in vm.guest.net:
+ mac = device.macAddress
+ ips = list(device.ipAddress)
+ netDict[mac] = ips
+ for k,v in netDict.iteritems():
+ for ipaddress in v:
+ if ipaddress:
+ if '::' in ipaddress:
+ facts['ipv6'] = ipaddress
+ else:
+ facts['ipv4'] = ipaddress
+
+ for idx,entry in enumerate(vm.config.hardware.device):
+ if not hasattr(entry, 'macAddress'):
+ continue
+
+ factname = 'hw_eth' + str(idx)
+ facts[factname] = {
+ 'addresstype': entry.addressType,
+ 'label': entry.deviceInfo.label,
+ 'macaddress': entry.macAddress,
+ 'ipaddresses': netDict.get(entry.macAddress, None),
+ 'macaddress_dash': entry.macAddress.replace(':', '-'),
+ 'summary': entry.deviceInfo.summary,
+ }
+ facts['hw_interfaces'].append('eth'+str(idx))
+
+ return facts
+
+
+ def remove_vm(self, vm):
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
+ task = vm.Destroy()
+ self.wait_for_task(task)
+
+ if task.info.state == 'error':
+ return ({'changed': False, 'failed': True, 'msg': task.info.error.msg})
+ else:
+ return ({'changed': True, 'failed': False})
+
+
+ def deploy_template(self, poweron=False, wait_for_ip=False):
+
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+
+ # FIXME:
+ # - clusters
+ # - multiple datacenters
+ # - resource pools
+ # - multiple templates by the same name
+ # - multiple disks
+ # - changing the esx host is ignored?
+ # - static IPs
+
+ # FIXME: need to search for this in the same way as guests to ensure accuracy
+ template = get_obj(self.content, [vim.VirtualMachine], self.params['template'])
+ if not template:
+ self.module.fail_json(msg="Could not find a template named %s" % self.params['template'])
+
+ datacenters = get_all_objs(self.content, [vim.Datacenter])
+ datacenter = get_obj(self.content, [vim.Datacenter],
+ self.params['datacenter'])
+ if not datacenter:
+ self.module.fail_json(msg='No datacenter named %s was found' % self.params['datacenter'])
+
+ if not self.foldermap:
+ self.folders, self.foldermap = self.getfolders()
+
+ # find matching folders
+ if self.params['folder'].startswith('/'):
+ folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0] == self.params['folder']]
+ else:
+ folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0].endswith(self.params['folder'])]
+
+ # throw error if more than one match or no matches
+ if len(folders) == 0:
+ self.module.fail_json(msg='no folder matched the path: %s' % self.params['folder'])
+ elif len(folders) > 1:
+ self.module.fail_json(msg='too many folders matched "%s", please give the full path starting with /vm/' % self.params['folder'])
+
+ # grab the folder vim object
+ destfolder = folders[0][1]
+
+ # FIXME: cluster or hostsystem ... ?
+ #cluster = get_obj(self.content, [vim.ClusterComputeResource], self.params['esxi']['hostname'])
+ hostsystem = get_obj(self.content, [vim.HostSystem], self.params['esxi_hostname'])
+
+ # set the destination datastore in the relocation spec
+ datastore_name = None
+ datastore = None
+ if self.params['disk']:
+ if 'datastore' in self.params['disk'][0]:
+ datastore_name = self.params['disk'][0]['datastore']
+ datastore = get_obj(self.content, [vim.Datastore], datastore_name)
+ if not datastore:
+ # use the template's existing DS
+ disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ datastore = disks[0].backing.datastore
+ datastore_name = datastore.name
+ if not datastore:
+ self.module.fail_json(msg="Failed to find a matching datastore")
+
+ # create the relocation spec
+ relospec = vim.vm.RelocateSpec()
+ relospec.host = hostsystem
+ relospec.datastore = datastore
+
+ # Find the associated resourcepool for the host system
+ # * FIXME: find resourcepool for clusters too
+ resource_pool = None
+ resource_pools = get_all_objs(self.content, [vim.ResourcePool])
+ for rp in resource_pools.items():
+ if rp[0].parent == hostsystem.parent:
+ resource_pool = rp[0]
+ break
+ if resource_pool:
+ relospec.pool = resource_pool
+ else:
+ self.module.fail_json(msg="Failed to find a resource group for %s" \
+ % hostsystem.name)
+
+ clonespec_kwargs = {}
+ clonespec_kwargs['location'] = relospec
+
+ # create disk spec if not default
+ if self.params['disk']:
+ # grab the template's first disk and modify it for this customization
+ disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ # set the operation to edit so that it knows to keep other settings
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ diskspec.device = disks[0]
+
+ # get the first disk attributes
+ pspec = self.params.get('disk')[0]
+
+ # is it thin?
+ if pspec.get('type', '').lower() == 'thin':
+ diskspec.device.backing.thinProvisioned = True
+
+ # which datastore?
+ if pspec.get('datastore'):
+ # This is already handled by the relocation spec,
+ # but it needs to eventually be handled for all the
+ # other disks defined
+ pass
+
+ # what size is it?
+ if [x for x in pspec.keys() if x.startswith('size_') or x == 'size']:
+ # size_tb, size_gb, size_mb, size_kb, size_b ...?
+ if 'size' in pspec:
+ # http://stackoverflow.com/a/1451407
+ trans = string.maketrans('', '')
+ chars = trans.translate(trans, string.digits)
+ expected = pspec['size'].translate(trans, chars)
+ expected = expected
+ unit = pspec['size'].replace(expected, '').lower()
+ expected = int(expected)
+ else:
+ param = [x for x in pspec.keys() if x.startswith('size_')][0]
+ unit = param.split('_')[-1].lower()
+ expected = [x[1] for x in pspec.items() if x[0].startswith('size_')][0]
+ expected = int(expected)
+
+ kb = None
+ if unit == 'tb':
+ kb = expected * 1024 * 1024 * 1024
+ elif unit == 'gb':
+ kb = expected * 1024 * 1024
+ elif unit ==' mb':
+ kb = expected * 1024
+ elif unit == 'kb':
+ kb = expected
+ else:
+ self.module.fail_json(msg='%s is not a supported unit for disk size' % unit)
+ diskspec.device.capacityInKB = kb
+
+ # tell the configspec that the disk device needs to change
+ configspec = vim.vm.ConfigSpec(deviceChange=[diskspec])
+ clonespec_kwargs['config'] = configspec
+
+ # set cpu/memory/etc
+ if 'hardware' in self.params:
+ if not 'config' in clonespec_kwargs:
+ clonespec_kwargs['config'] = vim.vm.ConfigSpec()
+ if 'num_cpus' in self.params['hardware']:
+ clonespec_kwargs['config'].numCPUs = \
+ int(self.params['hardware']['num_cpus'])
+ if 'memory_mb' in self.params['hardware']:
+ clonespec_kwargs['config'].memoryMB = \
+ int(self.params['hardware']['memory_mb'])
+
+ clonespec = vim.vm.CloneSpec(**clonespec_kwargs)
+ task = template.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
+ self.wait_for_task(task)
+
+ if task.info.state == 'error':
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
+ return ({'changed': False, 'failed': True, 'msg': task.info.error.msg})
+ else:
+
+ vm = task.info.result
+ if wait_for_ip:
+ self.set_powerstate(vm, 'poweredon', force=False)
+ self.wait_for_vm_ip(vm)
+ vm_facts = self.gather_facts(vm)
+ return ({'changed': True, 'failed': False, 'instance': vm_facts})
+
+
+ def wait_for_task(self, task):
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
+ # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
+ while task.info.state not in ['success', 'error']:
+ time.sleep(1)
+
+ def wait_for_vm_ip(self, vm, poll=100, sleep=5):
+ ips = None
+ facts = {}
+ thispoll = 0
+ while not ips and thispoll <= poll:
+ newvm = self.getvm(uuid=vm.config.uuid)
+ facts = self.gather_facts(newvm)
+ if facts['ipv4'] or facts['ipv6']:
+ ips = True
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+
+ return facts
+
+
+ def fetch_file_from_guest(self, vm, username, password, src, dest):
+
+ ''' Use VMWare's filemanager api to fetch a file over http '''
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
+ fti = self.content.guestOperationsManager.fileManager. \
+ InitiateFileTransferFromGuest(vm, creds, src)
+
+ result['size'] = fti.size
+ result['url'] = fti.url
+
+ # Use module_utils to fetch the remote url returned from the api
+ rsp, info = fetch_url(self.module, fti.url, use_proxy=False,
+ force=True, last_mod_time=None,
+ timeout=10, headers=None)
+
+ # save all of the transfer data
+ for k,v in info.iteritems():
+ result[k] = v
+
+ # exit early if xfer failed
+ if info['status'] != 200:
+ result['failed'] = True
+ return result
+
+ # attempt to read the content and write it
+ try:
+ with open(dest, 'wb') as f:
+ f.write(rsp.read())
+ except Exception as e:
+ result['failed'] = True
+ result['msg'] = str(e)
+
+ return result
+
+
+ def push_file_to_guest(self, vm, username, password, src, dest, overwrite=True):
+
+ ''' Use VMWare's filemanager api to push a file over http '''
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ # the api requires a filesize in bytes
+ filesize = None
+ fdata = None
+ try:
+ #filesize = os.path.getsize(src)
+ filesize = os.stat(src).st_size
+ fdata = None
+ with open(src, 'rb') as f:
+ fdata = f.read()
+ result['local_filesize'] = filesize
+ except Exception as e:
+ result['failed'] = True
+ result['msg'] = "Unable to read src file: %s" % str(e)
+ return result
+
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
+ file_attribute = vim.vm.guest.FileManager.FileAttributes()
+ url = self.content.guestOperationsManager.fileManager. \
+ InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
+ filesize, overwrite)
+
+ # PUT the filedata to the url ...
+ rsp, info = fetch_url(self.module, url, method="put", data=fdata,
+ use_proxy=False, force=True, last_mod_time=None,
+ timeout=10, headers=None)
+
+ result['msg'] = str(rsp.read())
+
+ # save all of the transfer data
+ for k,v in info.iteritems():
+ result[k] = v
+
+ return result
+
+
+ def run_command_in_guest(self, vm, username, password, program_path, program_args, program_cwd, program_env):
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ res = None
+ pdata = None
+ try:
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
+ pm = self.content.guestOperationsManager.processManager
+ # https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
+ ps = vim.vm.guest.ProcessManager.ProgramSpec(
+ #programPath=program,
+ #arguments=args
+ programPath=program_path,
+ arguments=program_args,
+ workingDirectory=program_cwd,
+ )
+ res = pm.StartProgramInGuest(vm, creds, ps)
+ result['pid'] = res
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+
+ # wait for pid to finish
+ while not pdata[0].endTime:
+ time.sleep(1)
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+ result['owner'] = pdata[0].owner
+ result['startTime'] = pdata[0].startTime.isoformat()
+ result['endTime'] = pdata[0].endTime.isoformat()
+ result['exitCode'] = pdata[0].exitCode
+ if result['exitCode'] != 0:
+ result['failed'] = True
+ result['msg'] = "program exited non-zero"
+ else:
+ result['msg'] = "program completed successfully"
+
+ except Exception as e:
+ result['msg'] = str(e)
+ result['failed'] = True
+
+ return result
+
+def get_obj(content, vimtype, name):
+ """
+ Return an object by name, if name is None the
+ first found object is returned
+ """
+ obj = None
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, vimtype, True)
+ for c in container.view:
+ if name:
+ if c.name == name:
+ obj = c
+ break
+ else:
+ obj = c
+ break
+
+ container.Destroy()
+ return obj
+
+
+def main():
+
+ vm = None
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(
+ type='str',
+ default=os.environ.get('VMWARE_HOST')
+ ),
+ username=dict(
+ type='str',
+ default=os.environ.get('VMWARE_USER')
+ ),
+ password=dict(
+ type='str', no_log=True,
+ default=os.environ.get('VMWARE_PASSWORD')
+ ),
+ state=dict(
+ required=False,
+ choices=[
+ 'poweredon',
+ 'poweredoff',
+ 'present',
+ 'absent',
+ 'restarted',
+ 'reconfigured'
+ ],
+ default='present'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ template_src=dict(required=False, type='str', aliases=['template']),
+ name=dict(required=True, type='str'),
+ name_match=dict(required=False, type='str', default='first'),
+ uuid=dict(required=False, type='str'),
+ folder=dict(required=False, type='str', default='/vm', aliases=['folder']),
+ disk=dict(required=False, type='list'),
+ nic=dict(required=False, type='list'),
+ hardware=dict(required=False, type='dict', default={}),
+ force=dict(required=False, type='bool', default=False),
+ datacenter=dict(required=False, type='str', default=None),
+ esxi_hostname=dict(required=False, type='str', default=None),
+ wait_for_ip_address=dict(required=False, type='bool', default=True)
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[
+ ['state', 'force'],
+ ['template'],
+ ],
+ )
+
+ pyv = PyVmomiHelper(module)
+
+ # Check if the VM exists before continuing
+ vm = pyv.getvm(name=module.params['name'],
+ folder=module.params['folder'],
+ uuid=module.params['uuid'],
+ name_match=module.params['name_match'])
+
+ # VM already exists
+ if vm:
+
+ if module.params['state'] == 'absent':
+ # destroy it
+ if module.params['force']:
+ # has to be poweredoff first
+ result = pyv.set_powerstate(vm, 'poweredoff', module.params['force'])
+ result = pyv.remove_vm(vm)
+ elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted']:
+ # set powerstate
+ result = pyv.set_powerstate(vm, module.params['state'], module.params['force'])
+ else:
+ # Run for facts only
+ try:
+ module.exit_json(instance=pyv.gather_facts(vm))
+ except Exception:
+ e = get_exception()
+ module.fail_json(
+ msg="Fact gather failed with exception %s" % e)
+
+ # VM doesn't exist
+ else:
+ create_states = ['poweredon', 'poweredoff', 'present', 'restarted']
+ if module.params['state'] in create_states:
+ poweron = (module.params['state'] != 'poweredoff')
+ # Create it ...
+ result = pyv.deploy_template(
+ poweron=poweron,
+ wait_for_ip=module.params['wait_for_ip_address']
+ )
+ result['changed'] = True
+ elif module.params['state'] == 'absent':
+ result = {'changed': False, 'failed': False}
+ else:
+ result = {'changed': False, 'failed': False}
+
+ # FIXME
+ if not 'failed' in result:
+ result['failed'] = False
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_host.py b/lib/ansible/modules/extras/cloud/vmware/vmware_host.py
new file mode 100644
index 0000000000..dd8e2f9eed
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_host.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_host
+short_description: Add/remove ESXi host to/from vCenter
+description:
+ - This module can be used to add/remove an ESXi host to/from vCenter
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter_name:
+ description:
+ - Name of the datacenter to add the host
+ required: True
+ cluster_name:
+ description:
+ - Name of the cluster to add the host
+ required: True
+ esxi_hostname:
+ description:
+ - ESXi hostname to manage
+ required: True
+ esxi_username:
+ description:
+ - ESXi username
+ required: True
+ esxi_password:
+ description:
+ - ESXi password
+ required: True
+ state:
+ description:
+ - Add or remove the host
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Add ESXi Host to VCSA
+ local_action:
+ module: vmware_host
+ hostname: vcsa_host
+ username: vcsa_user
+ password: vcsa_pass
+ datacenter_name: datacenter_name
+ cluster_name: cluster_name
+ esxi_hostname: esxi_hostname
+ esxi_username: esxi_username
+ esxi_password: esxi_password
+ state: present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareHost(object):
+ def __init__(self, module):
+ self.module = module
+ self.datacenter_name = module.params['datacenter_name']
+ self.cluster_name = module.params['cluster_name']
+ self.esxi_hostname = module.params['esxi_hostname']
+ self.esxi_username = module.params['esxi_username']
+ self.esxi_password = module.params['esxi_password']
+ self.state = module.params['state']
+ self.dc = None
+ self.cluster = None
+ self.host = None
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ # Currently state_update_dvs is not implemented.
+ host_states = {
+ 'absent': {
+ 'present': self.state_remove_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_add_host,
+ }
+ }
+
+ host_states[self.state][self.check_host_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def find_host_by_cluster_datacenter(self):
+ self.dc = find_datacenter_by_name(self.content, self.datacenter_name)
+ self.cluster = find_cluster_by_name_datacenter(self.dc, self.cluster_name)
+
+ for host in self.cluster.host:
+ if host.name == self.esxi_hostname:
+ return host, self.cluster
+
+ return None, self.cluster
+
+ def add_host_to_vcenter(self):
+ host_connect_spec = vim.host.ConnectSpec()
+ host_connect_spec.hostName = self.esxi_hostname
+ host_connect_spec.userName = self.esxi_username
+ host_connect_spec.password = self.esxi_password
+ host_connect_spec.force = True
+ host_connect_spec.sslThumbprint = ""
+ as_connected = True
+ esxi_license = None
+ resource_pool = None
+
+ try:
+ task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
+ success, result = wait_for_task(task)
+ return success, result
+ except TaskError as add_task_error:
+ # This is almost certain to fail the first time.
+ # In order to get the sslThumbprint we first connect
+ # get the vim.fault.SSLVerifyFault then grab the sslThumbprint
+ # from that object.
+ #
+ # args is a tuple, selecting the first tuple
+ ssl_verify_fault = add_task_error.args[0]
+ host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint
+
+ task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
+ success, result = wait_for_task(task)
+ return success, result
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_remove_host(self):
+ changed = True
+ result = None
+ if not self.module.check_mode:
+ if not self.host.runtime.inMaintenanceMode:
+ maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None)
+ changed, result = wait_for_task(maintenance_mode_task)
+
+ if changed:
+ task = self.host.Destroy_Task()
+ changed, result = wait_for_task(task)
+ else:
+ raise Exception(result)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_update_host(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def state_add_host(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.add_host_to_vcenter()
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_host_state(self):
+ self.host, self.cluster = self.find_host_by_cluster_datacenter()
+
+ if self.host is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
+ cluster_name=dict(required=True, type='str'),
+ esxi_hostname=dict(required=True, type='str'),
+ esxi_username=dict(required=True, type='str'),
+ esxi_password=dict(required=True, type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_host = VMwareHost(module)
+ vmware_host.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_local_user_manager.py b/lib/ansible/modules/extras/cloud/vmware/vmware_local_user_manager.py
new file mode 100644
index 0000000000..ff7736fe88
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_local_user_manager.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright IBM Corp. 2016
+# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/
+
+DOCUMENTATION = '''
+---
+module: vmware_local_user_manager
+short_description: Manage local users on an ESXi host
+description:
+ - Manage local users on an ESXi host
+version_added: "2.2"
+author: Andreas Nafpliotis
+notes:
+ - Tested on ESXi 6.0
+ - Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
+requirements:
+ - "python >= 2.6"
+ - PyVmomi installed
+options:
+ local_user_name:
+ description:
+ - The local user name to be changed
+ required: True
+ local_user_password:
+ description:
+ - The password to be set
+ required: False
+ local_user_description:
+ description:
+ - Description for the user
+ required: False
+ state:
+ description:
+ - Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
+ choices: ['present', 'absent']
+ default: present
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_local_user_manager command from Ansible Playbooks
+- name: Add local user to ESXi
+ local_action:
+ module: vmware_local_user_manager
+ hostname: esxi_hostname
+ username: root
+ password: vmware
+ local_user_name: foo
+'''
+
+RETURN = '''# '''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareLocalUserManager(object):
+ def __init__(self, module):
+ self.module = module
+ self.content = connect_to_api(self.module)
+ self.local_user_name = self.module.params['local_user_name']
+ self.local_user_password = self.module.params['local_user_password']
+ self.local_user_description = self.module.params['local_user_description']
+ self.state = self.module.params['state']
+
+ def process_state(self):
+ try:
+ local_account_manager_states = {
+ 'absent': {
+ 'present': self.state_remove_user,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_user,
+ 'absent': self.state_create_user,
+ }
+ }
+
+ local_account_manager_states[self.state][self.check_local_user_manager_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+
+ def check_local_user_manager_state(self):
+ user_account = self.find_user_account()
+ if not user_account:
+ return 'absent'
+ else:
+ return 'present'
+
+
+ def find_user_account(self):
+ searchStr = self.local_user_name
+ exactMatch = True
+ findUsers = True
+ findGroups = False
+ user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
+ return user_account
+
+
+ def create_account_spec(self):
+ account_spec = vim.host.LocalAccountManager.AccountSpecification()
+ account_spec.id = self.local_user_name
+ account_spec.password = self.local_user_password
+ account_spec.description = self.local_user_description
+ return account_spec
+
+
+ def state_create_user(self):
+ account_spec = self.create_account_spec()
+
+ try:
+ task = self.content.accountManager.CreateUser(account_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_update_user(self):
+ account_spec = self.create_account_spec()
+
+ try:
+ task = self.content.accountManager.UpdateUser(account_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+ def state_remove_user(self):
+ try:
+ task = self.content.accountManager.RemoveUser(self.local_user_name)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
+ local_user_password=dict(required=False, type='str', no_log=True),
+ local_user_description=dict(required=False, type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_local_user_manager = VMwareLocalUserManager(module)
+ vmware_local_user_manager.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_maintenancemode.py b/lib/ansible/modules/extras/cloud/vmware/vmware_maintenancemode.py
new file mode 100644
index 0000000000..84d774ec76
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_maintenancemode.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, VMware, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: vmware_maintenancemode
+short_description: Place a host into maintenance mode
+description:
+ - Place an ESXI host into maintenance mode
+ - Support for VSAN compliant maintenance mode when selected
+author: "Jay Jahns <jjahns@vmware.com>"
+version_added: "2.1"
+notes:
+ - Tested on vSphere 5.5 and 6.0
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ esxi_hostname:
+ description:
+ - Name of the host as defined in vCenter
+ required: True
+ vsan_mode:
+ description:
+ - Specify which VSAN compliant mode to enter
+ choices:
+ - 'ensureObjectAccessibility'
+ - 'evacuateAllData'
+ - 'noAction'
+ required: False
+ evacuate:
+ description:
+ - If True, evacuate all powered off VMs
+ choices:
+ - True
+ - False
+ default: False
+ required: False
+ timeout:
+ description:
+ - Specify a timeout for the operation
+ required: False
+ default: 0
+ state:
+ description:
+ - Enter or exit maintenance mode
+ choices:
+ - present
+ - absent
+ default: present
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Enter VSAN-Compliant Maintenance Mode
+ local_action:
+ module: vmware_maintenancemode
+ hostname: vc_host
+ username: vc_user
+ password: vc_pass
+ esxi_hostname: esxi.host.example
+ vsan: ensureObjectAccessibility
+ evacuate: yes
+ timeout: 3600
+ state: present
+'''
+RETURN = '''
+hostsystem:
+ description: Name of vim reference
+ returned: always
+ type: string
+ sample: "'vim.HostSystem:host-236'"
+hostname:
+ description: Name of host in vCenter
+ returned: always
+ type: string
+ sample: "esxi.local.domain"
+status:
+ description: Action taken
+ return: always
+ type: string
+ sample: "ENTER"
+'''
+
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def EnterMaintenanceMode(module, host):
+
+ if host.runtime.inMaintenanceMode:
+ module.exit_json(
+ changed=False,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='NO_ACTION',
+ msg='Host already in maintenance mode')
+
+ spec = vim.host.MaintenanceSpec()
+
+ if module.params['vsan']:
+ spec.vsanMode = vim.vsan.host.DecommissionMode()
+ spec.vsanMode.objectAction = module.params['vsan']
+
+ try:
+ task = host.EnterMaintenanceMode_Task(
+ module.params['timeout'],
+ module.params['evacuate'],
+ spec)
+
+ success, result = wait_for_task(task)
+
+ return dict(changed=success,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='ENTER',
+ msg='Host entered maintenance mode')
+
+ except TaskError:
+ module.fail_json(
+ msg='Host failed to enter maintenance mode')
+
+
+def ExitMaintenanceMode(module, host):
+ if not host.runtime.inMaintenanceMode:
+ module.exit_json(
+ changed=False,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='NO_ACTION',
+ msg='Host not in maintenance mode')
+
+ try:
+ task = host.ExitMaintenanceMode_Task(
+ module.params['timeout'])
+
+ success, result = wait_for_task(task)
+
+ return dict(changed=success,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='EXIT',
+ msg='Host exited maintenance mode')
+
+ except TaskError:
+ module.fail_json(
+ msg='Host failed to exit maintenance mode')
+
+
+def main():
+ spec = vmware_argument_spec()
+ spec.update(dict(
+ esxi_hostname=dict(required=True),
+ vsan=dict(required=False, choices=['ensureObjectAccessibility',
+ 'evacuateAllData',
+ 'noAction']),
+ evacuate=dict(required=False, type='bool', default=False),
+ timeout=dict(required=False, default=0, type='int'),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'absent'])))
+
+ module = AnsibleModule(argument_spec=spec)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ content = connect_to_api(module)
+ host = find_hostsystem_by_name(content, module.params['esxi_hostname'])
+
+ if not host:
+ module.fail_json(
+ msg='Host not found in vCenter')
+
+ if module.params['state'] == 'present':
+ result = EnterMaintenanceMode(module, host)
+
+ elif module.params['state'] == 'absent':
+ result = ExitMaintenanceMode(module, host)
+
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.vmware import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_migrate_vmk.py b/lib/ansible/modules/extras/cloud/vmware/vmware_migrate_vmk.py
new file mode 100644
index 0000000000..a18dcc4a88
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_migrate_vmk.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_migrate_vmk
+short_description: Migrate a VMK interface from VSS to VDS
+description:
+ - Migrate a VMK interface from VSS to VDS
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ esxi_hostname:
+ description:
+ - ESXi hostname to be managed
+ required: True
+ device:
+ description:
+ - VMK interface name
+ required: True
+ current_switch_name:
+ description:
+ - Switch VMK interface is currently on
+ required: True
+ current_portgroup_name:
+ description:
+ - Portgroup name VMK interface is currently on
+ required: True
+ migrate_switch_name:
+ description:
+ - Switch name to migrate VMK interface to
+ required: True
+ migrate_portgroup_name:
+ description:
+ - Portgroup name to migrate VMK interface to
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Migrate Management vmk
+ local_action:
+ module: vmware_migrate_vmk
+ hostname: vcsa_host
+ username: vcsa_user
+ password: vcsa_pass
+ esxi_hostname: esxi_hostname
+ device: vmk1
+ current_switch_name: temp_vswitch
+ current_portgroup_name: esx-mgmt
+ migrate_switch_name: dvSwitch
+ migrate_portgroup_name: Management
+'''
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareMigrateVmk(object):
+ def __init__(self, module):
+ self.module = module
+ self.host_system = None
+ self.migrate_switch_name = self.module.params['migrate_switch_name']
+ self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
+ self.device = self.module.params['device']
+ self.esxi_hostname = self.module.params['esxi_hostname']
+ self.current_portgroup_name = self.module.params['current_portgroup_name']
+ self.current_switch_name = self.module.params['current_switch_name']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ vmk_migration_states = {
+ 'migrate_vss_vds': self.state_migrate_vss_vds,
+ 'migrate_vds_vss': self.state_migrate_vds_vss,
+ 'migrated': self.state_exit_unchanged
+ }
+
+ vmk_migration_states[self.check_vmk_current_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_migrate_vds_vss(self):
+ self.module.exit_json(changed=False, msg="Currently Not Implemented")
+
+ def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
+ host_vnic_config = vim.host.VirtualNic.Config()
+ host_vnic_config.spec = vim.host.VirtualNic.Specification()
+
+ host_vnic_config.changeOperation = "edit"
+ host_vnic_config.device = self.device
+ host_vnic_config.portgroup = ""
+ host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
+ host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
+ host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
+
+ return host_vnic_config
+
+ def create_port_group_config(self):
+ port_group_config = vim.host.PortGroup.Config()
+ port_group_config.spec = vim.host.PortGroup.Specification()
+
+ port_group_config.changeOperation = "remove"
+ port_group_config.spec.name = self.current_portgroup_name
+ port_group_config.spec.vlanId = -1
+ port_group_config.spec.vswitchName = self.current_switch_name
+ port_group_config.spec.policy = vim.host.NetworkPolicy()
+
+ return port_group_config
+
+ def state_migrate_vss_vds(self):
+ host_network_system = self.host_system.configManager.networkSystem
+
+ dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
+ pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
+
+ config = vim.host.NetworkConfig()
+ config.portgroup = [self.create_port_group_config()]
+ config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ self.module.exit_json(changed=True)
+
+ def check_vmk_current_state(self):
+ self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
+
+ for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
+ if vnic.device == self.device:
+ #self.vnic = vnic
+ if vnic.spec.distributedVirtualPort is None:
+ if vnic.portgroup == self.current_portgroup_name:
+ return "migrate_vss_vds"
+ else:
+ dvs = find_dvs_by_name(self.content, self.current_switch_name)
+ if dvs is None:
+ return "migrated"
+ if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
+ return "migrate_vds_vss"
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
+ device=dict(required=True, type='str'),
+ current_switch_name=dict(required=True, type='str'),
+ current_portgroup_name=dict(required=True, type='str'),
+ migrate_switch_name=dict(required=True, type='str'),
+ migrate_portgroup_name=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ self.module.fail_json(msg='pyvmomi required for this module')
+
+ vmware_migrate_vmk = VMwareMigrateVmk(module)
+ vmware_migrate_vmk.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_portgroup.py b/lib/ansible/modules/extras/cloud/vmware/vmware_portgroup.py
new file mode 100644
index 0000000000..c367a976f2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_portgroup.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_portgroup
+short_description: Create a VMware portgroup
+description:
+ - Create a VMware portgroup
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ switch_name:
+ description:
+ - vSwitch to modify
+ required: True
+ portgroup_name:
+ description:
+ - Portgroup name to add
+ required: True
+ vlan_id:
+ description:
+ - VLAN ID to assign to portgroup
+ required: True
+ network_policy:
+ description:
+ - Network policy specifies layer 2 security settings for a
+ portgroup such as promiscuous mode, where guest adapter listens
+ to all the packets, MAC address changes and forged transmits.
+ Settings are promiscuous_mode, forged_transmits, mac_changes
+ required: False
+ version_added: "2.2"
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Add Management Network VM Portgroup
+ local_action:
+ module: vmware_portgroup
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ switch_name: vswitch_name
+ portgroup_name: portgroup_name
+ vlan_id: vlan_id
+
+ - name: Add Portgroup with Promiscuous Mode Enabled
+ local_action:
+ module: vmware_portgroup
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ switch_name: vswitch_name
+ portgroup_name: portgroup_name
+ network_policy:
+ promiscuous_mode: True
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def create_network_policy(promiscuous_mode, forged_transmits, mac_changes):
+
+ security_policy = vim.host.NetworkPolicy.SecurityPolicy()
+ if promiscuous_mode:
+ security_policy.allowPromiscuous = promiscuous_mode
+ if forged_transmits:
+ security_policy.forgedTransmits = forged_transmits
+ if mac_changes:
+ security_policy.macChanges = mac_changes
+ network_policy = vim.host.NetworkPolicy(security=security_policy)
+ return network_policy
+
+
+def create_port_group(host_system, portgroup_name, vlan_id, vswitch_name, network_policy):
+
+ config = vim.host.NetworkConfig()
+ config.portgroup = [vim.host.PortGroup.Config()]
+ config.portgroup[0].changeOperation = "add"
+ config.portgroup[0].spec = vim.host.PortGroup.Specification()
+ config.portgroup[0].spec.name = portgroup_name
+ config.portgroup[0].spec.vlanId = vlan_id
+ config.portgroup[0].spec.vswitchName = vswitch_name
+ config.portgroup[0].spec.policy = network_policy
+
+ host_network_config_result = host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
+ return True
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int'),
+ network_policy=dict(required=False, type='dict', default={})))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ portgroup_name = module.params['portgroup_name']
+ switch_name = module.params['switch_name']
+ vlan_id = module.params['vlan_id']
+ promiscuous_mode = module.params['network_policy'].get('promiscuous_mode', None)
+ forged_transmits = module.params['network_policy'].get('forged_transmits', None)
+ mac_changes = module.params['network_policy'].get('mac_changes', None)
+
+ try:
+ content = connect_to_api(module)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ raise SystemExit("Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+
+ if find_host_portgroup_by_name(host_system, portgroup_name):
+ module.exit_json(changed=False)
+
+ network_policy = create_network_policy(promiscuous_mode, forged_transmits, mac_changes)
+ changed = create_port_group(host_system, portgroup_name, vlan_id, switch_name, network_policy)
+
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_target_canonical_facts.py b/lib/ansible/modules/extras/cloud/vmware/vmware_target_canonical_facts.py
new file mode 100644
index 0000000000..cbf9d3edaa
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_target_canonical_facts.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_target_canonical_facts
+short_description: Return canonical (NAA) from an ESXi host
+description:
+ - Return canonical (NAA) from an ESXi host based on SCSI target ID
+version_added: "2.0"
+author: Joseph Callen
+notes:
+requirements:
+ - Tested on vSphere 5.5
+ - PyVmomi installed
+options:
+ target_id:
+ description:
+ - The target id based on order of scsi device
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_target_canonical_facts command from Ansible Playbooks
+- name: Get Canonical name
+ local_action: >
+ vmware_target_canonical_facts
+ hostname="{{ ansible_ssh_host }}" username=root password=vmware
+ target_id=7
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def find_hostsystem(content):
+ host_system = get_all_objs(content, [vim.HostSystem])
+ for host in host_system:
+ return host
+ return None
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(target_id=dict(required=True, type='int')))
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ content = connect_to_api(module)
+ host = find_hostsystem(content)
+
+ target_lun_uuid = {}
+ scsilun_canonical = {}
+
+ # Associate the scsiLun key with the canonicalName (NAA)
+ for scsilun in host.config.storageDevice.scsiLun:
+ scsilun_canonical[scsilun.key] = scsilun.canonicalName
+
+ # Associate target number with LUN uuid
+ for target in host.config.storageDevice.scsiTopology.adapter[0].target:
+ for lun in target.lun:
+ target_lun_uuid[target.target] = lun.scsiLun
+
+ module.exit_json(changed=False, canonical=scsilun_canonical[target_lun_uuid[module.params['target_id']]])
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.vmware import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vm_facts.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vm_facts.py
new file mode 100644
index 0000000000..6238184914
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vm_facts.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vm_facts
+short_description: Return basic facts pertaining to a vSphere virtual machine guest
+description:
+ - Return basic facts pertaining to a vSphere virtual machine guest
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Gather all registered virtual machines
+ local_action:
+ module: vmware_vm_facts
+ hostname: esxi_or_vcenter_ip_or_hostname
+ username: username
+ password: password
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
+def get_all_virtual_machines(content):
+ virtual_machines = get_all_objs(content, [vim.VirtualMachine])
+ _virtual_machines = {}
+
+ for vm in virtual_machines:
+ _ip_address = ""
+ summary = vm.summary
+ if summary.guest is not None:
+ _ip_address = summary.guest.ipAddress
+ if _ip_address is None:
+ _ip_address = ""
+
+ virtual_machine = {
+ summary.config.name: {
+ "guest_fullname": summary.config.guestFullName,
+ "power_state": summary.runtime.powerState,
+ "ip_address": _ip_address
+ }
+ }
+
+ _virtual_machines.update(virtual_machine)
+ return _virtual_machines
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ try:
+ content = connect_to_api(module)
+ _virtual_machines = get_all_virtual_machines(content)
+ module.exit_json(changed=False, virtual_machines=_virtual_machines)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vm_shell.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vm_shell.py
new file mode 100644
index 0000000000..80b4df192b
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vm_shell.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, 2016 Ritesh Khadgaray <khadgaray () gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vm_shell
+short_description: Execute a process in VM
+description:
+ - Start a program in a VM without the need for network connection
+version_added: 2.1
+author: "Ritesh Khadgaray (@ritzk)"
+notes:
+ - Tested on vSphere 5.5
+ - Only the first match against vm_id is used, even if there are multiple matches
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter:
+ description:
+ - The datacenter hosting the VM
+ - Will help speed up search
+ required: False
+ default: None
+ cluster:
+ description:
+ - The cluster hosting the VM
+ - Will help speed up search
+ required: False
+ default: None
+ vm_id:
+ description:
+ - The identification for the VM
+ required: True
+ vm_id_type:
+ description:
+ - The identification tag for the VM
+ default: vm_name
+ choices:
+ - 'uuid'
+ - 'dns_name'
+ - 'inventory_path'
+ - 'vm_name'
+ required: False
+ vm_username:
+ description:
+ - The user to connect to the VM.
+ required: False
+ default: None
+ vm_password:
+ description:
+ - The password used to login to the VM.
+ required: False
+ default: None
+ vm_shell:
+ description:
+ - The absolute path to the program to start. On Linux this is executed via bash.
+ required: True
+ vm_shell_args:
+ description:
+ - The argument to the program.
+ required: False
+ default: None
+ vm_shell_env:
+ description:
+ - Comma seperated list of envirnoment variable, specified in the guest OS notation
+ required: False
+ default: None
+ vm_shell_cwd:
+ description:
+ - The current working directory of the application from which it will be run
+ required: False
+ default: None
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+ - name: shell execution
+ local_action:
+ module: vmware_vm_shell
+ hostname: myVSphere
+ username: myUsername
+ password: mySecret
+ datacenter: myDatacenter
+ vm_id: NameOfVM
+ vm_username: root
+ vm_password: superSecret
+ vm_shell: /bin/echo
+ vm_shell_args: " $var >> myFile "
+ vm_shell_env:
+ - "PATH=/bin"
+ - "VAR=test"
+ vm_shell_cwd: "/tmp"
+
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
+def execute_command(content, vm, vm_username, vm_password, program_path, args="", env=None, cwd=None):
+
+ creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
+ cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, envVariables=env, programPath=program_path, workingDirectory=cwd)
+ cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec)
+
+ return cmdpid
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter=dict(default=None, type='str'),
+ cluster=dict(default=None, type='str'),
+ vm_id=dict(required=True, type='str'),
+ vm_id_type=dict(default='vm_name', type='str', choices=['inventory_path', 'uuid', 'dns_name', 'vm_name']),
+ vm_username=dict(required=False, type='str'),
+ vm_password=dict(required=False, type='str', no_log=True),
+ vm_shell=dict(required=True, type='str'),
+ vm_shell_args=dict(default=" ", type='str'),
+ vm_shell_env=dict(default=None, type='list'),
+ vm_shell_cwd=dict(default=None, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(changed=False, msg='pyvmomi is required for this module')
+
+
+ try:
+ p = module.params
+ datacenter_name = p['datacenter']
+ cluster_name = p['cluster']
+ content = connect_to_api(module)
+
+ datacenter = None
+ if datacenter_name:
+ datacenter = find_datacenter_by_name(content, datacenter_name)
+ if not datacenter:
+ module.fail_json(changed=False, msg="datacenter not found")
+
+ cluster = None
+ if cluster_name:
+ cluster = find_cluster_by_name(content, cluster_name, datacenter)
+ if not cluster:
+ module.fail_json(changed=False, msg="cluster not found")
+
+ vm = find_vm_by_id(content, p['vm_id'], p['vm_id_type'], datacenter, cluster)
+ if not vm:
+ module.fail_json(msg='VM not found')
+
+ msg = execute_command(content, vm, p['vm_username'], p['vm_password'],
+ p['vm_shell'], p['vm_shell_args'], p['vm_shell_env'], p['vm_shell_cwd'])
+
+ module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=msg)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(changed=False, msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(changed=False, msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(changed=False, msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vm_vss_dvs_migrate.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vm_vss_dvs_migrate.py
new file mode 100644
index 0000000000..00d98a3200
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vm_vss_dvs_migrate.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vm_vss_dvs_migrate
+short_description: Migrates a virtual machine from a standard vswitch to distributed
+description:
+ - Migrates a virtual machine from a standard vswitch to distributed
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ vm_name:
+ description:
+ - Name of the virtual machine to migrate to a dvSwitch
+ required: True
+ dvportgroup_name:
+ description:
+ - Name of the portgroup to migrate to the virtual machine to
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Migrate VCSA to vDS
+ local_action:
+ module: vmware_vm_vss_dvs_migrate
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ vm_name: virtual_machine_name
+ dvportgroup_name: distributed_portgroup_name
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareVmVssDvsMigrate(object):
+ def __init__(self, module):
+ self.module = module
+ self.content = connect_to_api(module)
+ self.vm = None
+ self.vm_name = module.params['vm_name']
+ self.dvportgroup_name = module.params['dvportgroup_name']
+
+ def process_state(self):
+ vm_nic_states = {
+ 'absent': self.migrate_network_adapter_vds,
+ 'present': self.state_exit_unchanged,
+ }
+
+ vm_nic_states[self.check_vm_network_state()]()
+
+ def find_dvspg_by_name(self):
+ vmware_distributed_port_group = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
+ for dvspg in vmware_distributed_port_group:
+ if dvspg.name == self.dvportgroup_name:
+ return dvspg
+ return None
+
+ def find_vm_by_name(self):
+ virtual_machines = get_all_objs(self.content, [vim.VirtualMachine])
+ for vm in virtual_machines:
+ if vm.name == self.vm_name:
+ return vm
+ return None
+
+ def migrate_network_adapter_vds(self):
+ vm_configspec = vim.vm.ConfigSpec()
+ nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ port = vim.dvs.PortConnection()
+ devicespec = vim.vm.device.VirtualDeviceSpec()
+
+ pg = self.find_dvspg_by_name()
+
+ if pg is None:
+ self.module.fail_json(msg="The standard portgroup was not found")
+
+ dvswitch = pg.config.distributedVirtualSwitch
+ port.switchUuid = dvswitch.uuid
+ port.portgroupKey = pg.key
+ nic.port = port
+
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ devicespec.device = device
+ devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ devicespec.device.backing = nic
+ vm_configspec.deviceChange.append(devicespec)
+
+ task = self.vm.ReconfigVM_Task(vm_configspec)
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def check_vm_network_state(self):
+ try:
+ self.vm = self.find_vm_by_name()
+
+ if self.vm is None:
+ self.module.fail_json(msg="A virtual machine with name %s does not exist" % self.vm_name)
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
+ return 'present'
+ return 'absent'
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(vm_name=dict(required=True, type='str'),
+ dvportgroup_name=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_vmnic_migrate = VMwareVmVssDvsMigrate(module)
+ vmware_vmnic_migrate.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel.py
new file mode 100644
index 0000000000..863a41226a
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vmkernel
+short_description: Create a VMware VMkernel Interface
+description:
+ - Create a VMware VMkernel Interface
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ vswitch_name:
+ description:
+ - The name of the vswitch where to add the VMK interface
+ required: True
+ portgroup_name:
+ description:
+ - The name of the portgroup for the VMK interface
+ required: True
+ ip_address:
+ description:
+ - The IP Address for the VMK interface
+ required: True
+ subnet_mask:
+ description:
+ - The Subnet Mask for the VMK interface
+ required: True
+ vland_id:
+ description:
+ - The VLAN ID for the VMK interface
+ required: True
+ mtu:
+ description:
+ - The MTU for the VMK interface
+ required: False
+ enable_vsan:
+ description:
+ - Enable the VMK interface for VSAN traffic
+ required: False
+ enable_vmotion:
+ description:
+ - Enable the VMK interface for vMotion traffic
+ required: False
+ enable_mgmt:
+ description:
+ - Enable the VMK interface for Management traffic
+ required: False
+ enable_ft:
+ description:
+ - Enable the VMK interface for Fault Tolerance traffic
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example command from Ansible Playbook
+
+- name: Add Management vmkernel port (vmk1)
+ local_action:
+ module: vmware_vmkernel
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ vswitch_name: vswitch_name
+ portgroup_name: portgroup_name
+ vlan_id: vlan_id
+ ip_address: ip_address
+ subnet_mask: subnet_mask
+ enable_mgmt: True
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def create_vmkernel_adapter(host_system, port_group_name,
+ vlan_id, vswitch_name,
+ ip_address, subnet_mask,
+ mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft):
+
+ host_config_manager = host_system.configManager
+ host_network_system = host_config_manager.networkSystem
+ host_virtual_vic_manager = host_config_manager.virtualNicManager
+ config = vim.host.NetworkConfig()
+
+ config.portgroup = [vim.host.PortGroup.Config()]
+ config.portgroup[0].changeOperation = "add"
+ config.portgroup[0].spec = vim.host.PortGroup.Specification()
+ config.portgroup[0].spec.name = port_group_name
+ config.portgroup[0].spec.vlanId = vlan_id
+ config.portgroup[0].spec.vswitchName = vswitch_name
+ config.portgroup[0].spec.policy = vim.host.NetworkPolicy()
+
+ config.vnic = [vim.host.VirtualNic.Config()]
+ config.vnic[0].changeOperation = "add"
+ config.vnic[0].portgroup = port_group_name
+ config.vnic[0].spec = vim.host.VirtualNic.Specification()
+ config.vnic[0].spec.ip = vim.host.IpConfig()
+ config.vnic[0].spec.ip.dhcp = False
+ config.vnic[0].spec.ip.ipAddress = ip_address
+ config.vnic[0].spec.ip.subnetMask = subnet_mask
+ if mtu:
+ config.vnic[0].spec.mtu = mtu
+
+ host_network_config_result = host_network_system.UpdateNetworkConfig(config, "modify")
+
+ for vnic_device in host_network_config_result.vnicDevice:
+ if enable_vsan:
+ vsan_system = host_config_manager.vsanSystem
+ vsan_config = vim.vsan.host.ConfigInfo()
+ vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
+
+ vsan_config.networkInfo.port = [vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()]
+
+ vsan_config.networkInfo.port[0].device = vnic_device
+ host_vsan_config_result = vsan_system.UpdateVsan_Task(vsan_config)
+
+ if enable_vmotion:
+ host_virtual_vic_manager.SelectVnicForNicType("vmotion", vnic_device)
+
+ if enable_mgmt:
+ host_virtual_vic_manager.SelectVnicForNicType("management", vnic_device)
+
+ if enable_ft:
+ host_virtual_vic_manager.SelectVnicForNicType("faultToleranceLogging", vnic_device)
+ return True
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
+ ip_address=dict(required=True, type='str'),
+ subnet_mask=dict(required=True, type='str'),
+ mtu=dict(required=False, type='int'),
+ enable_vsan=dict(required=False, type='bool'),
+ enable_vmotion=dict(required=False, type='bool'),
+ enable_mgmt=dict(required=False, type='bool'),
+ enable_ft=dict(required=False, type='bool'),
+ vswitch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ port_group_name = module.params['portgroup_name']
+ ip_address = module.params['ip_address']
+ subnet_mask = module.params['subnet_mask']
+ mtu = module.params['mtu']
+ enable_vsan = module.params['enable_vsan']
+ enable_vmotion = module.params['enable_vmotion']
+ enable_mgmt = module.params['enable_mgmt']
+ enable_ft = module.params['enable_ft']
+ vswitch_name = module.params['vswitch_name']
+ vlan_id = module.params['vlan_id']
+
+ try:
+ content = connect_to_api(module)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed = create_vmkernel_adapter(host_system, port_group_name,
+ vlan_id, vswitch_name,
+ ip_address, subnet_mask,
+ mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft)
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py
new file mode 100644
index 0000000000..31c50e6c68
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vmkernel_ip_config
+short_description: Configure the VMkernel IP Address
+description:
+ - Configure the VMkernel IP Address
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ vmk_name:
+ description:
+ - VMkernel interface name
+ required: True
+ ip_address:
+ description:
+ - IP address to assign to VMkernel interface
+ required: True
+ subnet_mask:
+ description:
+ - Subnet Mask to assign to VMkernel interface
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example command from Ansible Playbook
+
+- name: Configure IP address on ESX host
+ local_action:
+ module: vmware_vmkernel_ip_config
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ vmk_name: vmk0
+ ip_address: 10.0.0.10
+ subnet_mask: 255.255.255.0
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
+
+ host_config_manager = host_system.configManager
+ host_network_system = host_config_manager.networkSystem
+
+ for vnic in host_network_system.networkConfig.vnic:
+ if vnic.device == vmk_name:
+ spec = vnic.spec
+ if spec.ip.ipAddress != ip_address:
+ spec.ip.dhcp = False
+ spec.ip.ipAddress = ip_address
+ spec.ip.subnetMask = subnet_mask
+ host_network_system.UpdateVirtualNic(vmk_name, spec)
+ return True
+ return False
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
+ ip_address=dict(required=True, type='str'),
+ subnet_mask=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmk_name = module.params['vmk_name']
+ ip_address = module.params['ip_address']
+ subnet_mask = module.params['subnet_mask']
+
+ try:
+ content = connect_to_api(module, False)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vmotion.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vmotion.py
new file mode 100644
index 0000000000..43e8a5d5d0
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vmotion.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Bede Carroll <bc+github () bedecarroll.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vmotion
+short_description: Move a virtual machine using vMotion
+description:
+ - Using VMware vCenter, move a virtual machine using vMotion to a different
+ host.
+version_added: 2.2
+author: "Bede Carroll (@bedecarroll)"
+notes:
+ - Tested on vSphere 6.0
+requirements:
+ - "python >= 2.6"
+ - pyVmomi
+options:
+ vm_name:
+ description:
+ - Name of the VM to perform a vMotion on
+ required: True
+ aliases: ['vm']
+ destination_host:
+ description:
+ - Name of the end host the VM should be running on
+ required: True
+ aliases: ['destination']
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Perform vMotion of VM
+ local_action:
+ module: vmware_vmotion
+ hostname: 'vcenter_hostname'
+ username: 'vcenter_username'
+ password: 'vcenter_password'
+ validate_certs: False
+ vm_name: 'vm_name_as_per_vcenter'
+ destination_host: 'destination_host_as_per_vcenter'
+'''
+
+RETURN = '''
+running_host:
+ description: List the host the virtual machine is registered to
+ returned:
+ - changed
+ - success
+ type: string
+ sample: 'host1.example.com'
+'''
+
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def migrate_vm(vm_object, host_object):
+ """
+ Migrate virtual machine and return the task.
+ """
+ relocate_spec = vim.vm.RelocateSpec(host=host_object)
+ task_object = vm_object.Relocate(relocate_spec)
+ return task_object
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ vm_name=dict(required=True, aliases=['vm'], type='str'),
+ destination_host=dict(required=True, aliases=['destination'], type='str'),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyVmomi is required for this module')
+
+ content = connect_to_api(module=module)
+
+ vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
+ host_object = find_hostsystem_by_name(content=content, hostname=module.params['destination_host'])
+
+ # Setup result
+ result = {
+ 'changed': False
+ }
+
+ # Check if we could find the VM or Host
+ if not vm_object:
+ module.fail_json(msg='Cannot find virtual machine')
+ if not host_object:
+ module.fail_json(msg='Cannot find host')
+
+ # Make sure VM isn't already at the destination
+ if vm_object.runtime.host.name == module.params['destination_host']:
+ module.exit_json(**result)
+
+ if not module.check_mode:
+ # Migrate VM and get Task object back
+ task_object = migrate_vm(vm_object=vm_object, host_object=host_object)
+
+ # Wait for task to complete
+ wait_for_task(task_object)
+
+ # If task was a success the VM has moved, update running_host and complete module
+ if task_object.info.state == vim.TaskInfo.State.success:
+ vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
+ result['running_host'] = vm_object.runtime.host.name
+ result['changed'] = True
+ module.exit_json(**result)
+ else:
+ if task_object.info.error is None:
+ module.fail_json(msg='Unable to migrate VM due to an error, please check vCenter')
+ else:
+ module.fail_json(msg='Unable to migrate VM due to an error: %s' % task_object.info.error)
+ else:
+ # If we are in check mode return a result as if move was performed
+ result['running_host'] = module.params['destination_host']
+ result['changed'] = True
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.vmware import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vsan_cluster.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vsan_cluster.py
new file mode 100644
index 0000000000..015386d906
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vsan_cluster.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Russell Teague <rteague2 () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vsan_cluster
+short_description: Configure VSAN clustering on an ESXi host
+description:
+ - This module can be used to configure VSAN clustering on an ESXi host
+version_added: 2.0
+author: "Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ cluster_uuid:
+ description:
+ - Desired cluster UUID
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example command from Ansible Playbook
+
+- name: Configure VMware VSAN Cluster
+ hosts: deploy_node
+ gather_facts: False
+ tags:
+ - vsan
+ tasks:
+ - name: Configure VSAN on first host
+ vmware_vsan_cluster:
+ hostname: "{{ groups['esxi'][0] }}"
+ username: "{{ esxi_username }}"
+ password: "{{ site_password }}"
+ register: vsan_cluster
+
+ - name: Configure VSAN on remaining hosts
+ vmware_vsan_cluster:
+ hostname: "{{ item }}"
+ username: "{{ esxi_username }}"
+ password: "{{ site_password }}"
+ cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
+ with_items: groups['esxi'][1:]
+
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def create_vsan_cluster(host_system, new_cluster_uuid):
+ host_config_manager = host_system.configManager
+ vsan_system = host_config_manager.vsanSystem
+
+ vsan_config = vim.vsan.host.ConfigInfo()
+ vsan_config.enabled = True
+
+ if new_cluster_uuid is not None:
+ vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
+ vsan_config.clusterInfo.uuid = new_cluster_uuid
+
+ vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
+ vsan_config.storageInfo.autoClaimStorage = True
+
+ task = vsan_system.UpdateVsan_Task(vsan_config)
+ changed, result = wait_for_task(task)
+
+ host_status = vsan_system.QueryHostStatus()
+ cluster_uuid = host_status.uuid
+
+ return changed, result, cluster_uuid
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ new_cluster_uuid = module.params['cluster_uuid']
+
+ try:
+ content = connect_to_api(module, False)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
+ module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
+
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vmware_vswitch.py b/lib/ansible/modules/extras/cloud/vmware/vmware_vswitch.py
new file mode 100644
index 0000000000..7b115056ef
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vmware_vswitch.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vmware_vswitch
+short_description: Add a VMware Standard Switch to an ESXi host
+description:
+ - Add a VMware Standard Switch to an ESXi host
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ switch_name:
+ description:
+ - vSwitch name to add
+ required: True
+ nic_name:
+ description:
+ - vmnic name to attach to vswitch
+ required: True
+ number_of_ports:
+ description:
+ - Number of port to configure on vswitch
+ default: 128
+ required: False
+ mtu:
+ description:
+ - MTU to configure on vswitch
+ required: False
+ state:
+ description:
+ - Add or remove the switch
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Add a VMware vSwitch
+ local_action:
+ module: vmware_vswitch
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ switch_name: vswitch_name
+ nic_name: vmnic_name
+ mtu: 9000
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def find_vswitch_by_name(host, vswitch_name):
+ for vss in host.config.network.vswitch:
+ if vss.name == vswitch_name:
+ return vss
+ return None
+
+
+class VMwareHostVirtualSwitch(object):
+
+ def __init__(self, module):
+ self.host_system = None
+ self.content = None
+ self.vss = None
+ self.module = module
+ self.switch_name = module.params['switch_name']
+ self.number_of_ports = module.params['number_of_ports']
+ self.nic_name = module.params['nic_name']
+ self.mtu = module.params['mtu']
+ self.state = module.params['state']
+ self.content = connect_to_api(self.module)
+
+ def process_state(self):
+ try:
+ vswitch_states = {
+ 'absent': {
+ 'present': self.state_destroy_vswitch,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_vswitch,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_vswitch,
+ }
+ }
+
+ vswitch_states[self.state][self.check_vswitch_configuration()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+
+ # Source from
+ # https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py
+
+ def state_create_vswitch(self):
+ vss_spec = vim.host.VirtualSwitch.Specification()
+ vss_spec.numPorts = self.number_of_ports
+ vss_spec.mtu = self.mtu
+ vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[self.nic_name])
+ self.host_system.configManager.networkSystem.AddVirtualSwitch(vswitchName=self.switch_name, spec=vss_spec)
+ self.module.exit_json(changed=True)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_destroy_vswitch(self):
+ config = vim.host.NetworkConfig()
+
+ for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup:
+ if portgroup.spec.vswitchName == self.vss.name:
+ portgroup_config = vim.host.PortGroup.Config()
+ portgroup_config.changeOperation = "remove"
+ portgroup_config.spec = vim.host.PortGroup.Specification()
+ portgroup_config.spec.name = portgroup.spec.name
+ portgroup_config.spec.name = portgroup.spec.name
+ portgroup_config.spec.vlanId = portgroup.spec.vlanId
+ portgroup_config.spec.vswitchName = portgroup.spec.vswitchName
+ portgroup_config.spec.policy = vim.host.NetworkPolicy()
+ config.portgroup.append(portgroup_config)
+
+ self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
+ self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name)
+ self.module.exit_json(changed=True)
+
+ def state_update_vswitch(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def check_vswitch_configuration(self):
+ host = get_all_objs(self.content, [vim.HostSystem])
+ if not host:
+ self.module.fail_json(msg="Unable to find host")
+
+ self.host_system = host.keys()[0]
+ self.vss = find_vswitch_by_name(self.host_system, self.switch_name)
+
+ if self.vss is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(switch_name=dict(required=True, type='str'),
+ nic_name=dict(required=True, type='str'),
+ number_of_ports=dict(required=False, type='int', default=128),
+ mtu=dict(required=False, type='int', default=1500),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ host_virtual_switch = VMwareHostVirtualSwitch(module)
+ host_virtual_switch.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/vmware/vsphere_copy.py b/lib/ansible/modules/extras/cloud/vmware/vsphere_copy.py
new file mode 100644
index 0000000000..41971fa977
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/vmware/vsphere_copy.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015 Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: vsphere_copy
+short_description: Copy a file to a vCenter datastore
+description:
+ - Upload files to a vCenter datastore
+version_added: 2.0
+author: Dag Wieers (@dagwieers) <dag@wieers.com>
+options:
+ host:
+ description:
+ - The vCenter server on which the datastore is available.
+ required: true
+ login:
+ description:
+ - The login name to authenticate on the vCenter server.
+ required: true
+ password:
+ description:
+ - The password to authenticate on the vCenter server.
+ required: true
+ src:
+ description:
+ - The file to push to vCenter
+ required: true
+ datacenter:
+ description:
+ - The datacenter on the vCenter server that holds the datastore.
+ required: true
+ datastore:
+ description:
+ - The datastore on the vCenter server to push files to.
+ required: true
+ path:
+ description:
+ - The file to push to the datastore on the vCenter server.
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+notes:
+ - "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
+ It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
+ - Tested on vSphere 5.5
+'''
+
+EXAMPLES = '''
+- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file
+ transport: local
+- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file
+ delegate_to: other_system
+'''
+
+import atexit
+import urllib
+import mmap
+import errno
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+
+def vmware_path(datastore, datacenter, path):
+ ''' Constructs a URL path that VSphere accepts reliably '''
+ path = "/folder/%s" % path.lstrip("/")
+ # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
+ # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
+ datacenter = datacenter.replace('&', '%26')
+ if not path.startswith("/"):
+ path = "/" + path
+ params = dict( dsName = datastore )
+ if datacenter:
+ params["dcPath"] = datacenter
+ params = urllib.urlencode(params)
+ return "%s?%s" % (path, params)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ host = dict(required=True, aliases=[ 'hostname' ]),
+ login = dict(required=True, aliases=[ 'username' ]),
+ password = dict(required=True, no_log=True),
+ src = dict(required=True, aliases=[ 'name' ]),
+ datacenter = dict(required=True),
+ datastore = dict(required=True),
+ dest = dict(required=True, aliases=[ 'path' ]),
+ validate_certs = dict(required=False, default=True, type='bool'),
+ ),
+ # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
+ supports_check_mode = False,
+ )
+
+ host = module.params.get('host')
+ login = module.params.get('login')
+ password = module.params.get('password')
+ src = module.params.get('src')
+ datacenter = module.params.get('datacenter')
+ datastore = module.params.get('datastore')
+ dest = module.params.get('dest')
+ validate_certs = module.params.get('validate_certs')
+
+ fd = open(src, "rb")
+ atexit.register(fd.close)
+
+ data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
+ atexit.register(data.close)
+
+ remote_path = vmware_path(datastore, datacenter, dest)
+ url = 'https://%s%s' % (host, remote_path)
+
+ headers = {
+ "Content-Type": "application/octet-stream",
+ "Content-Length": str(len(data)),
+ }
+
+ try:
+ r = open_url(url, data=data, headers=headers, method='PUT',
+ url_username=login, url_password=password, validate_certs=validate_certs,
+ force_basic_auth=True)
+ except socket.error:
+ e = get_exception()
+ if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
+ # VSphere resets connection if the file is in use and cannot be replaced
+ module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url)
+ else:
+ module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url)
+ except Exception:
+ e = get_exception()
+ error_code = -1
+ try:
+ if isinstance(e[0], int):
+ error_code = e[0]
+ except KeyError:
+ pass
+ module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url)
+
+ status = r.getcode()
+ if 200 <= status < 300:
+ module.exit_json(changed=True, status=status, reason=r.msg, url=url)
+ else:
+ length = r.headers.get('content-length', None)
+ if r.headers.get('transfer-encoding', '').lower() == 'chunked':
+ chunked = 1
+ else:
+ chunked = 0
+
+ module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/cloud/webfaction/__init__.py b/lib/ansible/modules/extras/cloud/webfaction/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/webfaction/__init__.py
diff --git a/lib/ansible/modules/extras/cloud/webfaction/webfaction_app.py b/lib/ansible/modules/extras/cloud/webfaction/webfaction_app.py
new file mode 100644
index 0000000000..8f40a9ab85
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/webfaction/webfaction_app.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+#
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+
+ state:
+ description:
+ - Whether the application should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
+ required: true
+
+ autostart:
+ description:
+ - Whether the app should restart with an autostart.cgi script
+ required: false
+ default: "no"
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ required: false
+ default: null
+
+ port_open:
+ description:
+ - IF the port should be opened
+ required: false
+ default: false
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ required: false
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ webfaction_app:
+ name="my_wsgi_app1"
+ state=present
+ type=mod_wsgi35-python27
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+ machine={{webfaction_machine}}
+'''
+
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ type = dict(required=True),
+ autostart = dict(required=False, type='bool', default=False),
+ extra_info = dict(required=False, default=""),
+ port_open = dict(required=False, type='bool', default=False),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ machine = dict(required=False, default=False),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed = False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed = False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(app_state))
+
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/cloud/webfaction/webfaction_db.py b/lib/ansible/modules/extras/cloud/webfaction/webfaction_db.py
new file mode 100644
index 0000000000..6c45e700e9
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/webfaction/webfaction_db.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+#
+# Create a webfaction database using Ansible and the Webfaction API
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+
+ state:
+ description:
+ - Whether the database should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+
+ password:
+ description:
+ - The password for the new database user.
+ required: false
+ default: None
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ required: false
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type = dict(required=True),
+ password = dict(required=False, default=None),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ machine = dict(required=False, default=False),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed = False,
+ )
+
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed = False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(db_state))
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/cloud/webfaction/webfaction_domain.py b/lib/ansible/modules/extras/cloud/webfaction/webfaction_domain.py
new file mode 100644
index 0000000000..c809dd6beb
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/webfaction/webfaction_domain.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted.
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+
+ state:
+ description:
+ - Whether the domain should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ required: false
+ default: null
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains = dict(required=False, default=[]),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed = False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed = False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(domain_state))
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/cloud/webfaction/webfaction_mailbox.py b/lib/ansible/modules/extras/cloud/webfaction/webfaction_mailbox.py
new file mode 100644
index 0000000000..bcb355c963
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/webfaction/webfaction_mailbox.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+#
+# Create webfaction mailbox using Ansible and the Webfaction API
+#
+# ------------------------------------------
+# (c) Quentin Stafford-Fraser and Andy Baker 2015
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+ default: null
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/cloud/webfaction/webfaction_site.py b/lib/ansible/modules/extras/cloud/webfaction/webfaction_site.py
new file mode 100644
index 0000000000..bd5504b6b4
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/webfaction/webfaction_site.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+#
+# Create Webfaction website using Ansible and the Webfaction API
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+
+ state:
+ description:
+ - Whether the website should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ required: false
+ choices:
+ - true
+ - false
+ default: 'false'
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ required: false
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ required: false
+ default: null
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: create website
+ webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: no
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host = dict(required=True),
+ https = dict(required=False, type='bool', default=False),
+ subdomains = dict(required=False, type='list', default=[]),
+ site_apps = dict(required=False, type='list', default=[]),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed = False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append( (a[0], a[1]) )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website (
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed = False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(site_state))
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/cloud/xenserver_facts.py b/lib/ansible/modules/extras/cloud/xenserver_facts.py
new file mode 100644
index 0000000000..fdefee9f2e
--- /dev/null
+++ b/lib/ansible/modules/extras/cloud/xenserver_facts.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python -tt
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+version_added: "2.0"
+short_description: get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp
+'''
+
+import platform
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ xenserver:
+
+- name: Print running VMs
+ debug: msg="{{ item }}"
+ with_items: xs_vms.keys()
+ when: xs_vms[item]['power_state'] == "Running"
+
+TASK: [Print running VMs] ***********************************************************
+skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+ "item": "Control domain on host: 10.0.13.22",
+ "msg": "Control domain on host: 10.0.13.22"
+}
+'''
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ # Be aware! Deprecated in Python 2.6!
+ result = platform.dist()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ xs_networks = {}
+ networks = change_keys(recs, key='uuid')
+ for network in networks.itervalues():
+ xs_networks[network['name_label']] = network
+ return xs_networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.itervalues():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.iteritems():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+def get_vms(session):
+ xs_vms = {}
+ recs = session.xenapi.VM.get_all()
+ if not recs:
+ return None
+
+ vms = change_keys(recs, key='uuid')
+ for vm in vms.itervalues():
+ xs_vms[vm['name_label']] = vm
+ return xs_vms
+
+
+def get_srs(session):
+ xs_srs = {}
+ recs = session.xenapi.SR.get_all()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='uuid')
+ for sr in srs.itervalues():
+ xs_srs[sr['name_label']] = sr
+ return xs_srs
+
+def main():
+ module = AnsibleModule({})
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure, e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible=data)
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/clustering/__init__.py b/lib/ansible/modules/extras/clustering/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/__init__.py
diff --git a/lib/ansible/modules/extras/clustering/consul.py b/lib/ansible/modules/extras/clustering/consul.py
new file mode 100644
index 0000000000..b9cdfb09d8
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/consul.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: consul
+short_description: "Add, modify & delete services within a consul cluster."
+description:
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
+ consul's discovery mechanism. It may optionally supply a check definition,
+ a periodic service test to notify the consul cluster of service's health.
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
+ notify the health of the entire node to the cluster.
+ Service level checks do not require a check name or id as these are derived
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a check_name and optionally a check_id."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
+ metadata for a registered check. Without this metadata it is not possible to
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
+ changed occurred. An api method is planned to supply this metadata so at that
+ stage change management will be added.
+ - "See http://consul.io for more details."
+requirements:
+ - "python >= 2.6"
+ - python-consul
+ - requests
+version_added: "2.0"
+author: "Steve Gargan (@sgargan)"
+options:
+ state:
+ description:
+ - register or deregister the consul service, defaults to present
+ required: true
+ choices: ['present', 'absent']
+ service_name:
+ description:
+ - Unique name for the service on a node, must be unique per node,
+ required if registering a service. May be ommitted if registering
+ a node level check
+ required: false
+ service_id:
+ description:
+ - the ID for the service, must be unique per node, defaults to the
+ service name if the service name is supplied
+ required: false
+ default: service_name if supplied
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ version_added: "2.1"
+ validate_certs:
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+ version_added: "2.1"
+ notes:
+ description:
+ - Notes to attach to check when registering it.
+ required: false
+ default: None
+ service_port:
+ description:
+ - the port on which the service is listening required for
+ registration of a service, i.e. if service_name or service_id is set
+ required: false
+ service_address:
+ description:
+ - the address to advertise that the service will be listening on.
+ This value will be passed as the I(Address) parameter to Consul's
+ U(/v1/agent/service/register) API method, so refer to the Consul API
+ documentation for further details.
+ required: false
+ default: None
+ version_added: "2.1"
+ tags:
+ description:
+ - a list of tags that will be attached to the service registration.
+ required: false
+ default: None
+ script:
+ description:
+ - the script/command that will be run periodically to check the health
+ of the service. Scripts require an interval and vise versa
+ required: false
+ default: None
+ interval:
+ description:
+ - the interval at which the service check will be run. This is a number
+ with a s or m suffix to signify the units of seconds or minutes e.g
+ 15s or 1m. If no suffix is supplied, m will be used by default e.g.
+ 1 will be 1m. Required if the script param is specified.
+ required: false
+ default: None
+ check_id:
+ description:
+ - an ID for the service check, defaults to the check name, ignored if
+ part of a service definition.
+ required: false
+ default: None
+ check_name:
+ description:
+ - a name for the service check, defaults to the check id. required if
+ standalone, ignored if part of service definition.
+ required: false
+ default: None
+ ttl:
+ description:
+ - checks can be registered with a ttl instead of a script and interval
+ this means that the service will check in with the agent before the
+ ttl expires. If it doesn't the check will be considered failed.
+ Required if registering a check and the script an interval are missing
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes e.g 15s or 1m. If no suffix
+ is supplied, m will be used by default e.g. 1 will be 1m
+ required: false
+ default: None
+ http:
+ description:
+ - checks can be registered with an http endpoint. This means that consul
+ will check that the http endpoint returns a successful http status.
+ Interval must also be provided with this option.
+ required: false
+ default: None
+ version_added: "2.0"
+ timeout:
+ description:
+ - A custom HTTP check timeout. The consul default is 10 seconds.
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes, e.g. 15s or 1m.
+ required: false
+ default: None
+ version_added: "2.0"
+ token:
+ description:
+ - the token key indentifying an ACL rule set. May be required to register services.
+ required: false
+ default: None
+"""
+
+EXAMPLES = '''
+ - name: register nginx service with the local consul agent
+ consul:
+ service_name: nginx
+ service_port: 80
+
+ - name: register nginx service with curl check
+ consul:
+ service_name: nginx
+ service_port: 80
+ script: "curl http://localhost"
+ interval: 60s
+
+ - name: register nginx with an http check
+ consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ http: /status
+
+ - name: register external service nginx available at 10.1.5.23
+ consul:
+ service_name: nginx
+ service_port: 80
+ service_address: 10.1.5.23
+
+ - name: register nginx with some service tags
+ consul:
+ service_name: nginx
+ service_port: 80
+ tags:
+ - prod
+ - webservers
+
+ - name: remove nginx service
+ consul:
+ service_name: nginx
+ state: absent
+
+ - name: create a node level check to test disk usage
+ consul:
+ check_name: Disk usage
+ check_id: disk_usage
+ script: "/opt/disk_usage.py"
+ interval: 5m
+
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError, e:
+ python_consul_installed = False
+
+def register_with_consul(module):
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ add(module)
+ else:
+ remove(module)
+
+
+def add(module):
+ ''' adds a service or a check depending on supplied configuration'''
+ check = parse_check(module)
+ service = parse_service(module)
+
+ if not service and not check:
+ module.fail_json(msg='a name and port are required to register a service')
+
+ if service:
+ if check:
+ service.add_check(check)
+ add_service(module, service)
+ elif check:
+ add_check(module, check)
+
+
+def remove(module):
+ ''' removes a service or a check '''
+ service_id = module.params.get('service_id') or module.params.get('service_name')
+ check_id = module.params.get('check_id') or module.params.get('check_name')
+ if not (service_id or check_id):
+ module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
+ if service_id:
+ remove_service(module, service_id)
+ else:
+ remove_check(module, check_id)
+
+
+def add_check(module, check):
+ ''' registers a check with the given agent. currently there is no way
+ retrieve the full metadata of an existing check through the consul api.
+ Without this we can't compare to the supplied check and so we must assume
+ a change. '''
+ if not check.name:
+ module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
+
+ consul_api = get_consul_api(module)
+ check.register(consul_api)
+
+ module.exit_json(changed=True,
+ check_id=check.check_id,
+ check_name=check.name,
+ script=check.script,
+ interval=check.interval,
+ ttl=check.ttl,
+ http=check.http,
+ timeout=check.timeout)
+
+
+def remove_check(module, check_id):
+ ''' removes a check using its id '''
+ consul_api = get_consul_api(module)
+
+ if check_id in consul_api.agent.checks():
+ consul_api.agent.check.deregister(check_id)
+ module.exit_json(changed=True, id=check_id)
+
+ module.exit_json(changed=False, id=check_id)
+
+
+def add_service(module, service):
+ ''' registers a service with the the current agent '''
+ result = service
+ changed = False
+
+ consul_api = get_consul_api(module)
+ existing = get_service_by_id(consul_api, service.id)
+
+ # there is no way to retrieve the details of checks so if a check is present
+ # in the service it must be re-registered
+ if service.has_checks() or not existing or not existing == service:
+
+ service.register(consul_api)
+ # check that it registered correctly
+ registered = get_service_by_id(consul_api, service.id)
+ if registered:
+ result = registered
+ changed = True
+
+ module.exit_json(changed=changed,
+ service_id=result.id,
+ service_name=result.name,
+ service_port=result.port,
+ checks=[check.to_dict() for check in service.checks],
+ tags=result.tags)
+
+
+def remove_service(module, service_id):
+ ''' deregister a service from the given agent using its service id '''
+ consul_api = get_consul_api(module)
+ service = get_service_by_id(consul_api, service_id)
+ if service:
+ consul_api.agent.service.deregister(service_id)
+ module.exit_json(changed=True, id=service_id)
+
+ module.exit_json(changed=False, id=service_id)
+
+
+def get_consul_api(module, token=None):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def get_service_by_id(consul_api, service_id):
+ ''' iterate the registered services and find one with the given id '''
+ for name, service in consul_api.agent.services().iteritems():
+ if service['ID'] == service_id:
+ return ConsulService(loaded=service)
+
+
+def parse_check(module):
+
+ if len(filter(None, [module.params.get('script'), module.params.get('ttl'), module.params.get('http')])) > 1:
+ module.fail_json(
+ msg='check are either script, http or ttl driven, supplying more than one does not make sense')
+
+ if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('http'):
+
+ return ConsulCheck(
+ module.params.get('check_id'),
+ module.params.get('check_name'),
+ module.params.get('check_node'),
+ module.params.get('check_host'),
+ module.params.get('script'),
+ module.params.get('interval'),
+ module.params.get('ttl'),
+ module.params.get('notes'),
+ module.params.get('http'),
+ module.params.get('timeout')
+ )
+
+
+def parse_service(module):
+
+ if module.params.get('service_name') and module.params.get('service_port'):
+ return ConsulService(
+ module.params.get('service_id'),
+ module.params.get('service_name'),
+ module.params.get('service_address'),
+ module.params.get('service_port'),
+ module.params.get('tags'),
+ )
+ elif module.params.get('service_name') and not module.params.get('service_port'):
+
+ module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?")
+
+
+class ConsulService():
+
+ def __init__(self, service_id=None, name=None, address=None, port=-1,
+ tags=None, loaded=None):
+ self.id = self.name = name
+ if service_id:
+ self.id = service_id
+ self.address = address
+ self.port = port
+ self.tags = tags
+ self.checks = []
+ if loaded:
+ self.id = loaded['ID']
+ self.name = loaded['Service']
+ self.port = loaded['Port']
+ self.tags = loaded['Tags']
+
+ def register(self, consul_api):
+ if len(self.checks) > 0:
+ check = self.checks[0]
+
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ port=self.port,
+ tags=self.tags,
+ check=check.check)
+ else:
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ port=self.port,
+ tags=self.tags)
+
+ def add_check(self, check):
+ self.checks.append(check)
+
+ def checks(self):
+ return self.checks
+
+ def has_checks(self):
+ return len(self.checks) > 0
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__)
+ and self.id == other.id
+ and self.name == other.name
+ and self.port == other.port
+ and self.tags == other.tags)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {'id': self.id, "name": self.name}
+ if self.port:
+ data['port'] = self.port
+ if self.tags and len(self.tags) > 0:
+ data['tags'] = self.tags
+ if len(self.checks) > 0:
+ data['check'] = self.checks[0].to_dict()
+ return data
+
+
+class ConsulCheck():
+
+ def __init__(self, check_id, name, node=None, host='localhost',
+ script=None, interval=None, ttl=None, notes=None, http=None, timeout=None):
+ self.check_id = self.name = name
+ if check_id:
+ self.check_id = check_id
+ self.notes = notes
+ self.node = node
+ self.host = host
+
+ self.interval = self.validate_duration('interval', interval)
+ self.ttl = self.validate_duration('ttl', ttl)
+ self.script = script
+ self.http = http
+ self.timeout = self.validate_duration('timeout', timeout)
+
+ self.check = None
+
+ if script:
+ self.check = consul.Check.script(script, self.interval)
+
+ if ttl:
+ self.check = consul.Check.ttl(self.ttl)
+
+ if http:
+ if interval is None:
+ raise Exception('http check must specify interval')
+
+ self.check = consul.Check.http(http, self.interval, self.timeout)
+
+
+ def validate_duration(self, name, duration):
+ if duration:
+ duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
+ if not any((duration.endswith(suffix) for suffix in duration_units)):
+ duration = "{}s".format(duration)
+ return duration
+
+ def register(self, consul_api):
+ consul_api.agent.check.register(self.name, check_id=self.check_id,
+ notes=self.notes,
+ check=self.check)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__)
+ and self.check_id == other.check_id
+ and self.name == other.name
+ and self.script == script
+ and self.interval == interval)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {}
+ self._add(data, 'id', attr='check_id')
+ self._add(data, 'name', attr='check_name')
+ self._add(data, 'script')
+ self._add(data, 'node')
+ self._add(data, 'notes')
+ self._add(data, 'host')
+ self._add(data, 'interval')
+ self._add(data, 'ttl')
+ self._add(data, 'http')
+ self._add(data, 'timeout')
+ return data
+
+ def _add(self, data, key, attr=None):
+ try:
+ if attr is None:
+ attr = key
+ data[key] = getattr(self, attr)
+ except:
+ pass
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. see http://python-consul.readthedocs.org/en/latest/#installation")
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ check_id=dict(required=False),
+ check_name=dict(required=False),
+ check_node=dict(required=False),
+ check_host=dict(required=False),
+ notes=dict(required=False),
+ script=dict(required=False),
+ service_id=dict(required=False),
+ service_name=dict(required=False),
+ service_address=dict(required=False, type='str', default=None),
+ service_port=dict(required=False, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ interval=dict(required=False, type='str'),
+ ttl=dict(required=False, type='str'),
+ http=dict(required=False, type='str'),
+ timeout=dict(required=False, type='str'),
+ tags=dict(required=False, type='list'),
+ token=dict(required=False, no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ test_dependencies(module)
+
+ try:
+ register_with_consul(module)
+ except ConnectionError, e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/clustering/consul_acl.py b/lib/ansible/modules/extras/clustering/consul_acl.py
new file mode 100644
index 0000000000..a30ba8ab4b
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/consul_acl.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: consul_acl
+short_description: "manipulate consul acl keys and rules"
+description:
+ - allows the addition, modification and deletion of ACL keys and associated
+ rules in a consul cluster via the agent. For more details on using and
+ configuring ACLs, see https://www.consul.io/docs/internals/acl.html.
+requirements:
+ - "python >= 2.6"
+ - python-consul
+ - pyhcl
+ - requests
+version_added: "2.0"
+author: "Steve Gargan (@sgargan)"
+options:
+ mgmt_token:
+ description:
+ - a management token is required to manipulate the acl lists
+ state:
+ description:
+ - whether the ACL pair should be present or absent
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ token_type:
+ description:
+ - the type of token that should be created, either management or
+ client
+ choices: ['client', 'management']
+ default: client
+ name:
+ description:
+ - the name that should be associated with the acl key, this is opaque
+ to Consul
+ required: false
+ token:
+ description:
+ - the token key indentifying an ACL rule set. If generated by consul
+ this will be a UUID.
+ required: false
+ rules:
+ description:
+ - an list of the rules that should be associated with a given token.
+ required: false
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ version_added: "2.1"
+ validate_certs:
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+ version_added: "2.1"
+"""
+
+EXAMPLES = '''
+ - name: create an acl token with rules
+ consul_acl:
+ mgmt_token: 'some_management_acl'
+ host: 'consul1.mycluster.io'
+ name: 'Foo access'
+ rules:
+ - key: 'foo'
+ policy: read
+ - key: 'private/foo'
+ policy: deny
+
+ - name: create an acl with specific token with both key and serivce rules
+ consul_acl:
+ mgmt_token: 'some_management_acl'
+ name: 'Foo access'
+ token: 'some_client_token'
+ rules:
+ - key: 'foo'
+ policy: read
+ - service: ''
+ policy: write
+ - service: 'secret-'
+ policy: deny
+
+ - name: remove a token
+ consul_acl:
+ mgmt_token: 'some_management_acl'
+ host: 'consul1.mycluster.io'
+ token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e'
+ state: absent
+'''
+
+import sys
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError, e:
+ python_consul_installed = False
+
+try:
+ import hcl
+ pyhcl_installed = True
+except ImportError:
+ pyhcl_installed = False
+
+from requests.exceptions import ConnectionError
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ update_acl(module)
+ else:
+ remove_acl(module)
+
+
+def update_acl(module):
+
+ rules = module.params.get('rules')
+ state = module.params.get('state')
+ token = module.params.get('token')
+ token_type = module.params.get('token_type')
+ mgmt = module.params.get('mgmt_token')
+ name = module.params.get('name')
+ consul = get_consul_api(module, mgmt)
+ changed = False
+
+ try:
+
+ if token:
+ existing_rules = load_rules_for_token(module, consul, token)
+ supplied_rules = yml_to_rules(module, rules)
+ changed = not existing_rules == supplied_rules
+ if changed:
+ y = supplied_rules.to_hcl()
+ token = consul.acl.update(
+ token,
+ name=name,
+ type=token_type,
+ rules=supplied_rules.to_hcl())
+ else:
+ try:
+ rules = yml_to_rules(module, rules)
+ if rules.are_rules():
+ rules = rules.to_hcl()
+ else:
+ rules = None
+
+ token = consul.acl.create(
+ name=name, type=token_type, rules=rules)
+ changed = True
+ except Exception, e:
+ module.fail_json(
+ msg="No token returned, check your managment key and that \
+ the host is in the acl datacenter %s" % e)
+ except Exception, e:
+ module.fail_json(msg="Could not create/update acl %s" % e)
+
+ module.exit_json(changed=changed,
+ token=token,
+ rules=rules,
+ name=name,
+ type=token_type)
+
+
+def remove_acl(module):
+ state = module.params.get('state')
+ token = module.params.get('token')
+ mgmt = module.params.get('mgmt_token')
+
+ consul = get_consul_api(module, token=mgmt)
+ changed = token and consul.acl.info(token)
+ if changed:
+ token = consul.acl.destroy(token)
+
+ module.exit_json(changed=changed, token=token)
+
+def load_rules_for_token(module, consul_api, token):
+ try:
+ rules = Rules()
+ info = consul_api.acl.info(token)
+ if info and info['Rules']:
+ rule_set = hcl.loads(to_ascii(info['Rules']))
+ for rule_type in rule_set:
+ for pattern, policy in rule_set[rule_type].iteritems():
+ rules.add_rule(rule_type, Rule(pattern, policy['policy']))
+ return rules
+ except Exception, e:
+ module.fail_json(
+ msg="Could not load rule list from retrieved rule data %s, %s" % (
+ token, e))
+
+ return json_to_rules(module, loaded)
+
+def to_ascii(unicode_string):
+ if isinstance(unicode_string, unicode):
+ return unicode_string.encode('ascii', 'ignore')
+ return unicode_string
+
+def yml_to_rules(module, yml_rules):
+ rules = Rules()
+ if yml_rules:
+ for rule in yml_rules:
+ if ('key' in rule and 'policy' in rule):
+ rules.add_rule('key', Rule(rule['key'], rule['policy']))
+ elif ('service' in rule and 'policy' in rule):
+ rules.add_rule('service', Rule(rule['service'], rule['policy']))
+ elif ('event' in rule and 'policy' in rule):
+ rules.add_rule('event', Rule(rule['event'], rule['policy']))
+ elif ('query' in rule and 'policy' in rule):
+ rules.add_rule('query', Rule(rule['query'], rule['policy']))
+ else:
+ module.fail_json(msg="a rule requires a key/service/event or query and a policy.")
+ return rules
+
+template = '''%s "%s" {
+ policy = "%s"
+}
+'''
+
+RULE_TYPES = ['key', 'service', 'event', 'query']
+
+class Rules:
+
+ def __init__(self):
+ self.rules = {}
+ for rule_type in RULE_TYPES:
+ self.rules[rule_type] = {}
+
+ def add_rule(self, rule_type, rule):
+ self.rules[rule_type][rule.pattern] = rule
+
+ def are_rules(self):
+ return len(self) > 0
+
+ def to_hcl(self):
+
+ rules = ""
+ for rule_type in RULE_TYPES:
+ for pattern, rule in self.rules[rule_type].iteritems():
+ rules += template % (rule_type, pattern, rule.policy)
+ return to_ascii(rules)
+
+ def __len__(self):
+ count = 0
+ for rule_type in RULE_TYPES:
+ count += len(self.rules[rule_type])
+ return count
+
+ def __eq__(self, other):
+ if not (other or isinstance(other, self.__class__)
+ or len(other) == len(self)):
+ return False
+
+ for rule_type in RULE_TYPES:
+ for name, other_rule in other.rules[rule_type].iteritems():
+ if not name in self.rules[rule_type]:
+ return False
+ rule = self.rules[rule_type][name]
+
+ if not (rule and rule == other_rule):
+ return False
+ return True
+
+ def __str__(self):
+ return self.to_hcl()
+
+class Rule:
+
+ def __init__(self, pattern, policy):
+ self.pattern = pattern
+ self.policy = policy
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__)
+ and self.pattern == other.pattern
+ and self.policy == other.policy)
+
+ def __hash__(self):
+ return hash(self.pattern) ^ hash(self.policy)
+
+ def __str__(self):
+ return '%s %s' % (self.pattern, self.policy)
+
+def get_consul_api(module, token=None):
+ if not token:
+ token = module.params.get('token')
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=token)
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "\
+ "see http://python-consul.readthedocs.org/en/latest/#installation")
+
+ if not pyhcl_installed:
+ module.fail_json( msg="pyhcl required for this module."\
+ " see https://pypi.python.org/pypi/pyhcl")
+
+def main():
+ argument_spec = dict(
+ mgmt_token=dict(required=True, no_log=True),
+ host=dict(default='localhost'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True),
+ name=dict(required=False),
+ port=dict(default=8500, type='int'),
+ rules=dict(default=None, required=False, type='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ token=dict(required=False, no_log=True),
+ token_type=dict(
+ required=False, choices=['client', 'management'], default='client')
+ )
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError, e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/clustering/consul_kv.py b/lib/ansible/modules/extras/clustering/consul_kv.py
new file mode 100644
index 0000000000..8163cbd986
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/consul_kv.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: consul_kv
+short_description: Manipulate entries in the key/value store of a consul cluster.
+description:
+ - Allows the addition, modification and deletion of key/value entries in a
+ consul cluster via the agent. The entire contents of the record, including
+ the indices, flags and session are returned as 'value'.
+ - If the key represents a prefix then Note that when a value is removed, the existing
+ value if any is returned as part of the results.
+ - "See http://www.consul.io/docs/agent/http.html#kv for more details."
+requirements:
+ - "python >= 2.6"
+ - python-consul
+ - requests
+version_added: "2.0"
+author: "Steve Gargan (@sgargan)"
+options:
+ state:
+ description:
+ - the action to take with the supplied key and value. If the state is
+ 'present', the key contents will be set to the value supplied,
+ 'changed' will be set to true only if the value was different to the
+ current contents. The state 'absent' will remove the key/value pair,
+ again 'changed' will be set to true only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the
+ lock associated with a key/value pair with the states 'acquire' or
+ 'release' respectively. a valid session must be supplied to make the
+ attempt changed will be true if the attempt is successful, false
+ otherwise.
+ required: false
+ choices: ['present', 'absent', 'acquire', 'release']
+ default: present
+ key:
+ description:
+ - the key at which the value should be stored.
+ required: true
+ value:
+ description:
+ - the value should be associated with the given key, required if state
+ is present
+ required: true
+ recurse:
+ description:
+ - if the key represents a prefix, each entry with the prefix can be
+ retrieved by setting this to true.
+ required: false
+ default: false
+ session:
+ description:
+ - the session that should be used to acquire or release a lock
+ associated with a key/value pair
+ required: false
+ default: None
+ token:
+ description:
+ - the token key indentifying an ACL rule set that controls access to
+ the key value pair
+ required: false
+ default: None
+ cas:
+ description:
+ - used when acquiring a lock with a session. If the cas is 0, then
+ Consul will only put the key if it does not already exist. If the
+ cas value is non-zero, then the key is only set if the index matches
+ the ModifyIndex of that key.
+ required: false
+ default: None
+ flags:
+ description:
+ - opaque integer value that can be passed when setting a value.
+ required: false
+ default: None
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ version_added: "2.1"
+ validate_certs:
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+ version_added: "2.1"
+"""
+
+
+EXAMPLES = '''
+
+ - name: add or update the value associated with a key in the key/value store
+ consul_kv:
+ key: somekey
+ value: somevalue
+
+ - name: remove a key from the store
+ consul_kv:
+ key: somekey
+ state: absent
+
+ - name: add a node to an arbitrary group via consul inventory (see consul.ini)
+ consul_kv:
+ key: ansible/groups/dc1/somenode
+ value: 'top_secret'
+
+ - name: Register a key/value pair with an associated session
+ consul_kv:
+ key: stg/node/server_birthday
+ value: 20160509
+ session: "{{ sessionid }}"
+ state: acquire
+'''
+
+import sys
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError, e:
+ python_consul_installed = False
+
+from requests.exceptions import ConnectionError
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state == 'acquire' or state == 'release':
+ lock(module, state)
+ if state == 'present':
+ add_value(module)
+ else:
+ remove_value(module)
+
+
+def lock(module, state):
+
+ consul_api = get_consul_api(module)
+
+ session = module.params.get('session')
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if not session:
+ module.fail(
+ msg='%s of lock for %s requested but no session supplied' %
+ (state, key))
+
+ index, existing = consul_api.kv.get(key)
+
+ changed = not existing or (existing and existing['Value'] != value)
+ if changed and not module.check_mode:
+ if state == 'acquire':
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ acquire=session,
+ flags=module.params.get('flags'))
+ else:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ release=session,
+ flags=module.params.get('flags'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key)
+
+
+def add_value(module):
+
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ index, existing = consul_api.kv.get(key)
+
+ changed = not existing or (existing and existing['Value'] != value)
+ if changed and not module.check_mode:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ flags=module.params.get('flags'))
+
+ if module.params.get('retrieve'):
+ index, stored = consul_api.kv.get(key)
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=stored)
+
+
+def remove_value(module):
+ ''' remove the value associated with the given key. if the recurse parameter
+ is set then any key prefixed with the given key will be removed. '''
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ index, existing = consul_api.kv.get(
+ key, recurse=module.params.get('recurse'))
+
+ changed = existing != None
+ if changed and not module.check_mode:
+ consul_api.kv.delete(key, module.params.get('recurse'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=existing)
+
+
+def get_consul_api(module, token=None):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "\
+ "see http://python-consul.readthedocs.org/en/latest/#installation")
+
+def main():
+
+ argument_spec = dict(
+ cas=dict(required=False),
+ flags=dict(required=False),
+ key=dict(required=True),
+ host=dict(default='localhost'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True),
+ port=dict(default=8500, type='int'),
+ recurse=dict(required=False, type='bool'),
+ retrieve=dict(required=False, default=True),
+ state=dict(default='present', choices=['present', 'absent', 'acquire', 'release']),
+ token=dict(required=False, default='anonymous', no_log=True),
+ value=dict(required=False),
+ session=dict(required=False)
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError, e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/clustering/consul_session.py b/lib/ansible/modules/extras/clustering/consul_session.py
new file mode 100644
index 0000000000..4d73356139
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/consul_session.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: consul_session
+short_description: "manipulate consul sessions"
+description:
+ - allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found here http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - "python >= 2.6"
+ - python-consul
+ - requests
+version_added: "2.0"
+author: "Steve Gargan @sgargan"
+options:
+ state:
+ description:
+ - whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the ID for the
+ session is returned in the output. If absent, the name or ID is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying info, node or list for the state; for node or info, the
+ node name or session id is required as parameter.
+ required: false
+ choices: ['present', 'absent', 'info', 'node', 'list']
+ default: present
+ name:
+ description:
+ - the name that should be associated with the session. This is opaque
+ to Consul and not required.
+ required: false
+ default: None
+ delay:
+ description:
+ - the optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds
+ default: 15
+ required: false
+ node:
+ description:
+ - the name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ required: false
+ default: None
+ datacenter:
+ description:
+ - name of the datacenter in which the session exists or should be
+ created.
+ required: false
+ default: None
+ checks:
+ description:
+ - a list of checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ required: false
+ default: None
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ version_added: "2.1"
+ validate_certs:
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+ version_added: "2.1"
+ behavior:
+ description:
+ - the optional behavior that can be attached to the session when it
+ is created. This can be set to either ‘release’ or ‘delete’. This
+ controls the behavior when a session is invalidated.
+ default: release
+ required: false
+ version_added: "2.2"
+"""
+
+EXAMPLES = '''
+- name: register basic session with consul
+ consul_session:
+ name: session1
+
+- name: register a session with an existing check
+ consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: register a session with lock_delay
+ consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: retrieve info about session by id
+ consul_session: id=session_id state=info
+
+- name: retrieve active sessions
+ consul_session: state=list
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError, e:
+ python_consul_installed = False
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ #ditch the index, this can be grabbed from the results
+ if sessions_list and sessions_list[1]:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ if not node:
+ module.fail_json(
+ msg="node name is required to retrieve sessions for node")
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+ if not session_id:
+ module.fail_json(
+ msg="session_id is required to retrieve indvidual session info")
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception, e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception, e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+ if not session_id:
+ module.fail_json(msg="""A session id must be supplied in order to
+ remove a session.""")
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception, e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'))
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "\
+ "see http://python-consul.readthedocs.org/en/latest/#installation")
+
+def main():
+ argument_spec = dict(
+ checks=dict(default=None, required=False, type='list'),
+ delay=dict(required=False,type='int', default='15'),
+ behavior=dict(required=False,type='str', default='release',
+ choices=['release', 'delete']),
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True),
+ id=dict(required=False),
+ name=dict(required=False),
+ node=dict(required=False),
+ state=dict(default='present',
+ choices=['present', 'absent', 'info', 'node', 'list']),
+ datacenter=dict(required=False)
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError, e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/clustering/kubernetes.py b/lib/ansible/modules/extras/clustering/kubernetes.py
new file mode 100644
index 0000000000..18372cb62d
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/kubernetes.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+
+DOCUMENTATION = '''
+---
+module: kubernetes
+version_added: "2.1"
+short_description: Manage Kubernetes resources.
+description:
+ - This module can manage Kubernetes resources on an existing cluster using
+ the Kubernetes server API. Users can specify in-line API data, or
+ specify an existing Kubernetes YAML file. Currently, this module,
+ Only supports HTTP Basic Auth
+ Only supports 'strategic merge' for update, http://goo.gl/fCPYxT
+ SSL certs are not working, use 'validate_certs=off' to disable
+options:
+ api_endpoint:
+ description:
+ - The IPv4 API endpoint of the Kubernetes cluster.
+ required: true
+ default: null
+ aliases: ["endpoint"]
+ inline_data:
+ description:
+ - The Kubernetes YAML data to send to the API I(endpoint). This option is
+ mutually exclusive with C('file_reference').
+ required: true
+ default: null
+ file_reference:
+ description:
+ - Specify full path to a Kubernets YAML file to send to API I(endpoint).
+ This option is mutually exclusive with C('inline_data').
+ required: false
+ default: null
+ certificate_authority_data:
+ description:
+ - Certificate Authority data for Kubernetes server. Should be in either
+ standard PEM format or base64 encoded PEM data. Note that certificate
+ verification is broken until ansible supports a version of
+ 'match_hostname' that can match the IP address against the CA data.
+ required: false
+ default: null
+ state:
+ description:
+ - The desired action to take on the Kubernetes data.
+ required: true
+ default: "present"
+ choices: ["present", "absent", "update", "replace"]
+ url_password:
+ description:
+ - The HTTP Basic Auth password for the API I(endpoint). This should be set
+ unless using the C('insecure') option.
+ default: null
+ aliases: ["password"]
+ url_username:
+ description:
+ - The HTTP Basic Auth username for the API I(endpoint). This should be set
+ unless using the C('insecure') option.
+ default: "admin"
+ aliases: ["username"]
+ insecure:
+ description:
+ - "Reverts the connection to using HTTP instead of HTTPS. This option should
+ only be used when execuing the M('kubernetes') module local to the Kubernetes
+ cluster using the insecure local port (locahost:8080 by default)."
+ validate_certs:
+ description:
+ - Enable/disable certificate validation. Note that this is set to
+ C(false) until Ansible can support IP address based certificate
+ hostname matching (exists in >= python3.5.0).
+ required: false
+ default: false
+
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a new namespace with in-line YAML.
+- name: Create a kubernetes namespace
+ kubernetes:
+ api_endpoint: 123.45.67.89
+ url_username: admin
+ url_password: redacted
+ inline_data:
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: ansible-test
+ labels:
+ label_env: production
+ label_ver: latest
+ annotations:
+ a1: value1
+ a2: value2
+ state: present
+
+# Create a new namespace from a YAML file.
+- name: Create a kubernetes namespace
+ kubernetes:
+ api_endpoint: 123.45.67.89
+ url_username: admin
+ url_password: redacted
+ file_reference: /path/to/create_namespace.yaml
+ state: present
+
+# Do the same thing, but using the insecure localhost port
+- name: Create a kubernetes namespace
+ kubernetes:
+ api_endpoint: 123.45.67.89
+ insecure: true
+ file_reference: /path/to/create_namespace.yaml
+ state: present
+
+'''
+
+RETURN = '''
+# Example response from creating a Kubernetes Namespace.
+api_response:
+ description: Raw response from Kubernetes API, content varies with API.
+ returned: success
+ type: dictionary
+ contains:
+ apiVersion: "v1"
+ kind: "Namespace"
+ metadata:
+ creationTimestamp: "2016-01-04T21:16:32Z"
+ name: "test-namespace"
+ resourceVersion: "509635"
+ selfLink: "/api/v1/namespaces/test-namespace"
+ uid: "6dbd394e-b328-11e5-9a02-42010af0013a"
+ spec:
+ finalizers:
+ - kubernetes
+ status:
+ phase: "Active"
+'''
+
+import yaml
+import base64
+
+############################################################################
+############################################################################
+# For API coverage, this Anislbe module provides capability to operate on
+# all Kubernetes objects that support a "create" call (except for 'Events').
+# In order to obtain a valid list of Kubernetes objects, the v1 spec file
+# was referenced and the below python script was used to parse the JSON
+# spec file, extract only the objects with a description starting with
+# 'create a'. The script then iterates over all of these base objects
+# to get the endpoint URL and was used to generate the KIND_URL map.
+#
+# import json
+# from urllib2 import urlopen
+#
+# r = urlopen("https://raw.githubusercontent.com/kubernetes"
+# "/kubernetes/master/api/swagger-spec/v1.json")
+# v1 = json.load(r)
+#
+# apis = {}
+# for a in v1['apis']:
+# p = a['path']
+# for o in a['operations']:
+# if o["summary"].startswith("create a") and o["type"] != "v1.Event":
+# apis[o["type"]] = p
+#
+# def print_kind_url_map():
+# results = []
+# for a in apis.keys():
+# results.append('"%s": "%s"' % (a[3:].lower(), apis[a]))
+# results.sort()
+# print "KIND_URL = {"
+# print ",\n".join(results)
+# print "}"
+#
+# if __name__ == '__main__':
+# print_kind_url_map()
+############################################################################
+############################################################################
+
+KIND_URL = {
+ "binding": "/api/v1/namespaces/{namespace}/bindings",
+ "endpoints": "/api/v1/namespaces/{namespace}/endpoints",
+ "limitrange": "/api/v1/namespaces/{namespace}/limitranges",
+ "namespace": "/api/v1/namespaces",
+ "node": "/api/v1/nodes",
+ "persistentvolume": "/api/v1/persistentvolumes",
+ "persistentvolumeclaim": "/api/v1/namespaces/{namespace}/persistentvolumeclaims", # NOQA
+ "pod": "/api/v1/namespaces/{namespace}/pods",
+ "podtemplate": "/api/v1/namespaces/{namespace}/podtemplates",
+ "replicationcontroller": "/api/v1/namespaces/{namespace}/replicationcontrollers", # NOQA
+ "resourcequota": "/api/v1/namespaces/{namespace}/resourcequotas",
+ "secret": "/api/v1/namespaces/{namespace}/secrets",
+ "service": "/api/v1/namespaces/{namespace}/services",
+ "serviceaccount": "/api/v1/namespaces/{namespace}/serviceaccounts"
+}
+USER_AGENT = "ansible-k8s-module/0.0.1"
+
+
+# TODO(erjohnso): SSL Certificate validation is currently unsupported.
+# It can be made to work when the following are true:
+# - Ansible consistently uses a "match_hostname" that supports IP Address
+# matching. This is now true in >= python3.5.0. Currently, this feature
+# is not yet available in backports.ssl_match_hostname (still 3.4).
+# - Ansible allows passing in the self-signed CA cert that is created with
+# a kubernetes master. The lib/ansible/module_utils/urls.py method,
+# SSLValidationHandler.get_ca_certs() needs a way for the Kubernetes
+# CA cert to be passed in and included in the generated bundle file.
+# When this is fixed, the following changes can be made to this module,
+# - Remove the 'return' statement in line 254 below
+# - Set 'required=true' for certificate_authority_data and ensure that
+# ansible's SSLValidationHandler.get_ca_certs() can pick up this CA cert
+# - Set 'required=true' for the validate_certs param.
+
+def decode_cert_data(module):
+ return
+ d = module.params.get("certificate_authority_data")
+ if d and not d.startswith("-----BEGIN"):
+ module.params["certificate_authority_data"] = base64.b64decode(d)
+
+
+def api_request(module, url, method="GET", headers=None, data=None):
+ body = None
+ if data:
+ data = json.dumps(data)
+ response, info = fetch_url(module, url, method=method, headers=headers, data=data)
+ if int(info['status']) == -1:
+ module.fail_json(msg="Failed to execute the API request: %s" % info['msg'], url=url, method=method, headers=headers)
+ if response is not None:
+ body = json.loads(response.read())
+ return info, body
+
+
+def k8s_create_resource(module, url, data):
+ info, body = api_request(module, url, method="POST", data=data, headers={"Content-Type": "application/json"})
+ if info['status'] == 409:
+ name = data["metadata"].get("name", None)
+ info, body = api_request(module, url + "/" + name)
+ return False, body
+ elif info['status'] >= 400:
+ module.fail_json(msg="failed to create the resource: %s" % info['msg'], url=url)
+ return True, body
+
+
+def k8s_delete_resource(module, url, data):
+ name = data.get('metadata', {}).get('name')
+ if name is None:
+ module.fail_json(msg="Missing a named resource in object metadata when trying to remove a resource")
+
+ url = url + '/' + name
+ info, body = api_request(module, url, method="DELETE")
+ if info['status'] == 404:
+ return False, "Resource name '%s' already absent" % name
+ elif info['status'] >= 400:
+ module.fail_json(msg="failed to delete the resource '%s': %s" % (name, info['msg']), url=url)
+ return True, "Successfully deleted resource name '%s'" % name
+
+
+def k8s_replace_resource(module, url, data):
+ name = data.get('metadata', {}).get('name')
+ if name is None:
+ module.fail_json(msg="Missing a named resource in object metadata when trying to replace a resource")
+
+ headers = {"Content-Type": "application/json"}
+ url = url + '/' + name
+ info, body = api_request(module, url, method="PUT", data=data, headers=headers)
+ if info['status'] == 409:
+ name = data["metadata"].get("name", None)
+ info, body = api_request(module, url + "/" + name)
+ return False, body
+ elif info['status'] >= 400:
+ module.fail_json(msg="failed to replace the resource '%s': %s" % (name, info['msg']), url=url)
+ return True, body
+
+
+def k8s_update_resource(module, url, data):
+ name = data.get('metadata', {}).get('name')
+ if name is None:
+ module.fail_json(msg="Missing a named resource in object metadata when trying to update a resource")
+
+ headers = {"Content-Type": "application/strategic-merge-patch+json"}
+ url = url + '/' + name
+ info, body = api_request(module, url, method="PATCH", data=data, headers=headers)
+ if info['status'] == 409:
+ name = data["metadata"].get("name", None)
+ info, body = api_request(module, url + "/" + name)
+ return False, body
+ elif info['status'] >= 400:
+ module.fail_json(msg="failed to update the resource '%s': %s" % (name, info['msg']), url=url)
+ return True, body
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+
+ url_username=dict(default="admin", aliases=["username"]),
+ url_password=dict(default="", no_log=True, aliases=["password"]),
+ force_basic_auth=dict(default="yes"),
+ validate_certs=dict(default=False, type='bool'),
+ certificate_authority_data=dict(required=False),
+ insecure=dict(default=False, type='bool'),
+ api_endpoint=dict(required=True),
+ file_reference=dict(required=False),
+ inline_data=dict(required=False),
+ state=dict(default="present", choices=["present", "absent", "update", "replace"])
+ ),
+ mutually_exclusive = (('file_reference', 'inline_data'),
+ ('url_username', 'insecure'),
+ ('url_password', 'insecure')),
+ required_one_of = (('file_reference', 'inline_data'),),
+ )
+
+ decode_cert_data(module)
+
+ api_endpoint = module.params.get('api_endpoint')
+ state = module.params.get('state')
+ insecure = module.params.get('insecure')
+ inline_data = module.params.get('inline_data')
+ file_reference = module.params.get('file_reference')
+
+ if inline_data:
+ if not isinstance(inline_data, dict) and not isinstance(inline_data, list):
+ data = yaml.load(inline_data)
+ else:
+ data = inline_data
+ else:
+ try:
+ f = open(file_reference, "r")
+ data = [x for x in yaml.load_all(f)]
+ f.close()
+ if not data:
+ module.fail_json(msg="No valid data could be found.")
+ except:
+ module.fail_json(msg="The file '%s' was not found or contained invalid YAML/JSON data" % file_reference)
+
+ # set the transport type and build the target endpoint url
+ transport = 'https'
+ if insecure:
+ transport = 'http'
+
+ target_endpoint = "%s://%s" % (transport, api_endpoint)
+
+ body = []
+ changed = False
+
+ # make sure the data is a list
+ if not isinstance(data, list):
+ data = [ data ]
+
+ for item in data:
+ namespace = "default"
+ if item and 'metadata' in item:
+ namespace = item.get('metadata', {}).get('namespace', "default")
+ kind = item.get('kind', '').lower()
+ try:
+ url = target_endpoint + KIND_URL[kind]
+ except KeyError:
+ module.fail_json(msg="invalid resource kind specified in the data: '%s'" % kind)
+ url = url.replace("{namespace}", namespace)
+ else:
+ url = target_endpoint
+
+ if state == 'present':
+ item_changed, item_body = k8s_create_resource(module, url, item)
+ elif state == 'absent':
+ item_changed, item_body = k8s_delete_resource(module, url, item)
+ elif state == 'replace':
+ item_changed, item_body = k8s_replace_resource(module, url, item)
+ elif state == 'update':
+ item_changed, item_body = k8s_update_resource(module, url, item)
+
+ changed |= item_changed
+ body.append(item_body)
+
+ module.exit_json(changed=changed, api_response=body)
+
+
+# import module snippets
+from ansible.module_utils.basic import * # NOQA
+from ansible.module_utils.urls import * # NOQA
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/clustering/znode.py b/lib/ansible/modules/extras/clustering/znode.py
new file mode 100644
index 0000000000..aff1cd1d22
--- /dev/null
+++ b/lib/ansible/modules/extras/clustering/znode.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# Copyright 2015 WP Engine, Inc. All rights reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: znode
+version_added: "2.0"
+short_description: Create, delete, retrieve, and update znodes using ZooKeeper.
+options:
+ hosts:
+ description:
+ - A list of ZooKeeper servers (format '[server]:[port]').
+ required: true
+ name:
+ description:
+ - The path of the znode.
+ required: true
+ value:
+ description:
+ - The value assigned to the znode.
+ default: None
+ required: false
+ op:
+ description:
+ - An operation to perform. Mutually exclusive with state.
+ default: None
+ required: false
+ state:
+ description:
+ - The state to enforce. Mutually exclusive with op.
+ default: None
+ required: false
+ timeout:
+ description:
+ - The amount of time to wait for a node to appear.
+ default: 300
+ required: false
+ recursive:
+ description:
+ - Recursively delete node and all its children.
+ default: False
+ required: false
+ version_added: "2.1"
+requirements:
+ - kazoo >= 2.1
+ - python >= 2.6
+author: "Trey Perry (@treyperry)"
+"""
+
+EXAMPLES = """
+# Creating or updating a znode with a given value
+- action: znode hosts=localhost:2181 name=/mypath value=myvalue state=present
+
+# Getting the value and stat structure for a znode
+- action: znode hosts=localhost:2181 name=/mypath op=get
+
+# Listing a particular znode's children
+- action: znode hosts=localhost:2181 name=/zookeeper op=list
+
+# Waiting 20 seconds for a znode to appear at path /mypath
+- action: znode hosts=localhost:2181 name=/mypath op=wait timeout=20
+
+# Deleting a znode at path /mypath
+- action: znode hosts=localhost:2181 name=/mypath state=absent
+"""
+
+try:
+ from kazoo.client import KazooClient
+ from kazoo.exceptions import NoNodeError, ZookeeperError
+ from kazoo.handlers.threading import KazooTimeoutError
+ KAZOO_INSTALLED = True
+except ImportError:
+ KAZOO_INSTALLED = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hosts=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
+ state=dict(choices=['present', 'absent']),
+ timeout=dict(required=False, default=300, type='int'),
+ recursive=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ if not KAZOO_INSTALLED:
+ module.fail_json(msg='kazoo >= 2.1 is required to use this module. Use pip to install it.')
+
+ check = check_params(module.params)
+ if not check['success']:
+ module.fail_json(msg=check['msg'])
+
+ zoo = KazooCommandProxy(module)
+ try:
+ zoo.start()
+ except KazooTimeoutError:
+ module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
+
+ command_dict = {
+ 'op': {
+ 'get': zoo.get,
+ 'list': zoo.list,
+ 'wait': zoo.wait
+ },
+ 'state': {
+ 'present': zoo.present,
+ 'absent': zoo.absent
+ }
+ }
+
+ command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
+ method = module.params[command_type]
+ result, result_dict = command_dict[command_type][method]()
+ zoo.shutdown()
+
+ if result:
+ module.exit_json(**result_dict)
+ else:
+ module.fail_json(**result_dict)
+
+
+def check_params(params):
+ if not params['state'] and not params['op']:
+ return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
+
+ if params['state'] and params['op']:
+ return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
+
+ return {'success': True}
+
+
+class KazooCommandProxy():
+ def __init__(self, module):
+ self.module = module
+ self.zk = KazooClient(module.params['hosts'])
+
+ def absent(self):
+ return self._absent(self.module.params['name'])
+
+ def exists(self, znode):
+ return self.zk.exists(znode)
+
+ def list(self):
+ children = self.zk.get_children(self.module.params['name'])
+ return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
+ 'znode': self.module.params['name']}
+
+ def present(self):
+ return self._present(self.module.params['name'], self.module.params['value'])
+
+ def get(self):
+ return self._get(self.module.params['name'])
+
+ def shutdown(self):
+ self.zk.stop()
+ self.zk.close()
+
+ def start(self):
+ self.zk.start()
+
+ def wait(self):
+ return self._wait(self.module.params['name'], self.module.params['timeout'])
+
+ def _absent(self, znode):
+ if self.exists(znode):
+ self.zk.delete(znode, recursive=self.module.params['recursive'])
+ return True, {'changed': True, 'msg': 'The znode was deleted.'}
+ else:
+ return True, {'changed': False, 'msg': 'The znode does not exist.'}
+
+ def _get(self, path):
+ if self.exists(path):
+ value, zstat = self.zk.get(path)
+ stat_dict = {}
+ for i in dir(zstat):
+ if not i.startswith('_'):
+ attr = getattr(zstat, i)
+ if type(attr) in (int, str):
+ stat_dict[i] = attr
+ result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
+ 'stat': stat_dict}
+ else:
+ result = False, {'msg': 'The requested node does not exist.'}
+
+ return result
+
+ def _present(self, path, value):
+ if self.exists(path):
+ (current_value, zstat) = self.zk.get(path)
+ if value != current_value:
+ self.zk.set(path, value)
+ return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
+ 'value': value}
+ else:
+ return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
+ else:
+ self.zk.create(path, value, makepath=True)
+ return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
+
+ def _wait(self, path, timeout, interval=5):
+ lim = time.time() + timeout
+
+ while time.time() < lim:
+ if self.exists(path):
+ return True, {'msg': 'The node appeared before the configured timeout.',
+ 'znode': path, 'timeout': timeout}
+ else:
+ time.sleep(interval)
+
+ return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
+ 'znode': path}
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/commands/__init__.py b/lib/ansible/modules/extras/commands/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/commands/__init__.py
diff --git a/lib/ansible/modules/extras/commands/expect.py b/lib/ansible/modules/extras/commands/expect.py
new file mode 100644
index 0000000000..355f2cff48
--- /dev/null
+++ b/lib/ansible/modules/extras/commands/expect.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Martz <matt@sivel.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+
+try:
+ import pexpect
+ HAS_PEXPECT = True
+except ImportError:
+ HAS_PEXPECT = False
+
+
+DOCUMENTATION = '''
+---
+module: expect
+version_added: 2.0
+short_description: Executes a command and responds to prompts
+description:
+ - The M(expect) module executes a command and responds to prompts
+ - The given command will be executed on all selected nodes. It will not be
+ processed through the shell, so variables like C($HOME) and operations
+ like C("<"), C(">"), C("|"), and C("&") will not work
+options:
+ command:
+ description:
+ - the command module takes command to run.
+ required: true
+ creates:
+ description:
+ - a filename, when it already exists, this step will B(not) be run.
+ required: false
+ removes:
+ description:
+ - a filename, when it does not exist, this step will B(not) be run.
+ required: false
+ chdir:
+ description:
+ - cd into this directory before running the command
+ required: false
+ responses:
+ description:
+ - Mapping of expected string/regex and string to respond with. If the
+ response is a list, successive matches return successive
+ responses. List functionality is new in 2.1.
+ required: true
+ timeout:
+ description:
+ - Amount of time in seconds to wait for the expected strings
+ default: 30
+ echo:
+ description:
+ - Whether or not to echo out your response strings
+ default: false
+requirements:
+ - python >= 2.6
+ - pexpect >= 3.3
+notes:
+ - If you want to run a command through the shell (say you are using C(<),
+ C(>), C(|), etc), you must specify a shell in the command such as
+ C(/bin/bash -c "/path/to/something | grep else")
+ - The question, or key, under I(responses) is a python regex match. Case
+ insensitive searches are indicated with a prefix of C(?i)
+ - By default, if a question is encountered multiple times, it's string
+ response will be repeated. If you need different responses for successive
+ question matches, instead of a string response, use a list of strings as
+ the response. The list functionality is new in 2.1
+author: "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = '''
+# Case insensitve password string match
+- expect:
+ command: passwd username
+ responses:
+ (?i)password: "MySekretPa$$word"
+
+# Generic question with multiple different responses
+- expect:
+ command: /path/to/custom/command
+ responses:
+ Question:
+ - response1
+ - response2
+ - response3
+'''
+
+
+def response_closure(module, question, responses):
+ resp_gen = (u'%s\n' % r.rstrip('\n').decode() for r in responses)
+
+ def wrapped(info):
+ try:
+ return resp_gen.next()
+ except StopIteration:
+ module.fail_json(msg="No remaining responses for '%s', "
+ "output was '%s'" %
+ (question,
+ info['child_result_list'][-1]))
+
+ return wrapped
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True),
+ chdir=dict(),
+ creates=dict(),
+ removes=dict(),
+ responses=dict(type='dict', required=True),
+ timeout=dict(type='int', default=30),
+ echo=dict(type='bool', default=False),
+ )
+ )
+
+ if not HAS_PEXPECT:
+ module.fail_json(msg='The pexpect python module is required')
+
+ chdir = module.params['chdir']
+ args = module.params['command']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ responses = module.params['responses']
+ timeout = module.params['timeout']
+ echo = module.params['echo']
+
+ events = dict()
+ for key, value in responses.iteritems():
+ if isinstance(value, list):
+ response = response_closure(module, key, value)
+ else:
+ response = u'%s\n' % value.rstrip('\n').decode()
+
+ events[key.decode()] = response
+
+ if args.strip() == '':
+ module.fail_json(rc=256, msg="no command given")
+
+ if chdir:
+ chdir = os.path.abspath(os.path.expanduser(chdir))
+ os.chdir(chdir)
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ v = os.path.expanduser(creates)
+ if os.path.exists(v):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s exists" % v,
+ changed=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ v = os.path.expanduser(removes)
+ if not os.path.exists(v):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s does not exist" % v,
+ changed=False,
+ rc=0
+ )
+
+ startd = datetime.datetime.now()
+
+ try:
+ try:
+ # Prefer pexpect.run from pexpect>=4
+ out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo,
+ encoding='utf-8')
+ except TypeError:
+ # Use pexpect.runu in pexpect>=3.3,<4
+ out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo)
+ except (TypeError, AttributeError), e:
+ # This should catch all insufficient versions of pexpect
+ # We deem them insufficient for their lack of ability to specify
+ # to not echo responses via the run/runu functions, which would
+ # potentially leak sensentive information
+ module.fail_json(msg='Insufficient version of pexpect installed '
+ '(%s), this module requires pexpect>=3.3. '
+ 'Error was %s' % (pexpect.__version__, e))
+ except pexpect.ExceptionPexpect, e:
+ module.fail_json(msg='%s' % e)
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if out is None:
+ out = ''
+
+ ret = dict(
+ cmd=args,
+ stdout=out.rstrip('\r\n'),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ )
+
+ if rc is not None:
+ module.exit_json(**ret)
+ else:
+ ret['msg'] = 'command exceeded timeout'
+ module.fail_json(**ret)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/database/__init__.py b/lib/ansible/modules/extras/database/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/__init__.py
diff --git a/lib/ansible/modules/extras/database/influxdb/__init__.py b/lib/ansible/modules/extras/database/influxdb/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/influxdb/__init__.py
diff --git a/lib/ansible/modules/extras/database/influxdb/influxdb_database.py b/lib/ansible/modules/extras/database/influxdb/influxdb_database.py
new file mode 100644
index 0000000000..7cedc44d4d
--- /dev/null
+++ b/lib/ansible/modules/extras/database/influxdb/influxdb_database.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+
+# (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: influxdb_database
+short_description: Manage InfluxDB databases
+description:
+ - Manage InfluxDB databases
+version_added: 2.1
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ hostname:
+ description:
+ - The hostname or IP address on which InfluxDB server is listening
+ required: true
+ username:
+ description:
+ - Username that will be used to authenticate against InfluxDB server
+ default: root
+ required: false
+ password:
+ description:
+ - Password that will be used to authenticate against InfluxDB server
+ default: root
+ required: false
+ port:
+ description:
+ - The port on which InfluxDB server is listening
+ default: 8086
+ required: false
+ database_name:
+ description:
+ - Name of the database that will be created/destroyed
+ required: true
+ state:
+ description:
+ - Determines if the database should be created or destroyed
+ choices: ['present', 'absent']
+ default: present
+ required: false
+'''
+
+EXAMPLES = '''
+# Example influxdb_database command from Ansible Playbooks
+- name: Create database
+ influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: present
+
+- name: Destroy database
+ influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: absent
+
+- name: Create database using custom credentials
+ influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ username: "{{influxdb_username}}"
+ password: "{{influxdb_password}}"
+ database_name: "{{influxdb_database_name}}"
+ state: present
+'''
+
+RETURN = '''
+#only defaults
+'''
+
+try:
+ import requests.exceptions
+ from influxdb import InfluxDBClient
+ from influxdb import exceptions
+ HAS_INFLUXDB = True
+except ImportError:
+ HAS_INFLUXDB = False
+
+
+def influxdb_argument_spec():
+ return dict(
+ hostname=dict(required=True, type='str'),
+ port=dict(default=8086, type='int'),
+ username=dict(default='root', type='str'),
+ password=dict(default='root', type='str', no_log=True),
+ database_name=dict(required=True, type='str')
+ )
+
+
+def connect_to_influxdb(module):
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ database_name = module.params['database_name']
+
+ client = InfluxDBClient(
+ host=hostname,
+ port=port,
+ username=username,
+ password=password,
+ database=database_name
+ )
+ return client
+
+
+def find_database(module, client, database_name):
+ database = None
+
+ try:
+ databases = client.get_list_database()
+ for db in databases:
+ if db['name'] == database_name:
+ database = db
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return database
+
+
+def create_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.create_database(database_name)
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+def drop_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.drop_database(database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_INFLUXDB:
+ module.fail_json(msg='influxdb python package is required for this module')
+
+ state = module.params['state']
+ database_name = module.params['database_name']
+
+ client = connect_to_influxdb(module)
+ database = find_database(module, client, database_name)
+
+ if state == 'present':
+ if database:
+ module.exit_json(changed=False)
+ else:
+ create_database(module, client, database_name)
+
+ if state == 'absent':
+ if database:
+ drop_database(module, client, database_name)
+ else:
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/influxdb/influxdb_retention_policy.py b/lib/ansible/modules/extras/database/influxdb/influxdb_retention_policy.py
new file mode 100644
index 0000000000..ec4c32da21
--- /dev/null
+++ b/lib/ansible/modules/extras/database/influxdb/influxdb_retention_policy.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: influxdb_retention_policy
+short_description: Manage InfluxDB retention policies
+description:
+ - Manage InfluxDB retention policies
+version_added: 2.1
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ hostname:
+ description:
+ - The hostname or IP address on which InfluxDB server is listening
+ required: true
+ username:
+ description:
+ - Username that will be used to authenticate against InfluxDB server
+ default: root
+ required: false
+ password:
+ description:
+ - Password that will be used to authenticate against InfluxDB server
+ default: root
+ required: false
+ port:
+ description:
+ - The port on which InfluxDB server is listening
+ default: 8086
+ required: false
+ database_name:
+ description:
+ - Name of the database where retention policy will be created
+ required: true
+ policy_name:
+ description:
+ - Name of the retention policy
+ required: true
+ duration:
+ description:
+ - Determines how long InfluxDB should keep the data
+ required: true
+ replication:
+ description:
+ - Determines how many independent copies of each point are stored in the cluster
+ required: true
+ default:
+ description:
+ - Sets the retention policy as default retention policy
+ required: true
+'''
+
+EXAMPLES = '''
+# Example influxdb_retention_policy command from Ansible Playbooks
+- name: create 1 hour retention policy
+ influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1h
+ replication: 1
+
+- name: create 1 day retention policy
+ influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1d
+ replication: 1
+
+- name: create 1 week retention policy
+ influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1w
+ replication: 1
+
+- name: create infinite retention policy
+ influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: INF
+ replication: 1
+'''
+
+RETURN = '''
+#only defaults
+'''
+
+import re
+try:
+ import requests.exceptions
+ from influxdb import InfluxDBClient
+ from influxdb import exceptions
+ HAS_INFLUXDB = True
+except ImportError:
+ HAS_INFLUXDB = False
+
+
+def influxdb_argument_spec():
+ return dict(
+ hostname=dict(required=True, type='str'),
+ port=dict(default=8086, type='int'),
+ username=dict(default='root', type='str'),
+ password=dict(default='root', type='str', no_log=True),
+ database_name=dict(required=True, type='str')
+ )
+
+
+def connect_to_influxdb(module):
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ database_name = module.params['database_name']
+
+ client = InfluxDBClient(
+ host=hostname,
+ port=port,
+ username=username,
+ password=password,
+ database=database_name
+ )
+ return client
+
+
+def find_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ retention_policy = None
+
+ try:
+ retention_policies = client.get_list_retention_policies(database=database_name)
+ for policy in retention_policies:
+ if policy['name'] == policy_name:
+ retention_policy = policy
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return retention_policy
+
+
+def create_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+
+ if not module.check_mode:
+ try:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def alter_retention_policy(module, client, retention_policy):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ duration_regexp = re.compile('(\d+)([hdw]{1})|(^INF$){1}')
+ changed = False
+
+ duration_lookup = duration_regexp.search(duration)
+
+ if duration_lookup.group(2) == 'h':
+ influxdb_duration_format = '%s0m0s' % duration
+ elif duration_lookup.group(2) == 'd':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
+ elif duration_lookup.group(2) == 'w':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
+ elif duration == 'INF':
+ influxdb_duration_format = '0'
+
+ if not retention_policy['duration'] == influxdb_duration_format or not retention_policy['replicaN'] == int(replication) or not retention_policy['default'] == default:
+ if not module.check_mode:
+ try:
+ client.alter_retention_policy(policy_name, database_name, duration, replication, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = influxdb_argument_spec()
+ argument_spec.update(
+ policy_name=dict(required=True, type='str'),
+ duration=dict(required=True, type='str'),
+ replication=dict(required=True, type='int'),
+ default=dict(default=False, type='bool')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_INFLUXDB:
+ module.fail_json(msg='influxdb python package is required for this module')
+
+ client = connect_to_influxdb(module)
+ retention_policy = find_retention_policy(module, client)
+
+ if retention_policy:
+ alter_retention_policy(module, client, retention_policy)
+ else:
+ create_retention_policy(module, client)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/misc/__init__.py b/lib/ansible/modules/extras/database/misc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/misc/__init__.py
diff --git a/lib/ansible/modules/extras/database/misc/mongodb_parameter.py b/lib/ansible/modules/extras/database/misc/mongodb_parameter.py
new file mode 100644
index 0000000000..4904be3db3
--- /dev/null
+++ b/lib/ansible/modules/extras/database/misc/mongodb_parameter.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+(c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+Sponsored by Infopro Digital. http://www.infopro-digital.com/
+Sponsored by E.T.A.I. http://www.etai.fr/
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: mongodb_parameter
+short_description: Change an administrative parameter on a MongoDB server.
+description:
+ - Change an administrative parameter on a MongoDB server.
+version_added: "2.1"
+options:
+ login_user:
+ description:
+ - The username used to authenticate with
+ required: false
+ default: null
+ login_password:
+ description:
+ - The password used to authenticate with
+ required: false
+ default: null
+ login_host:
+ description:
+ - The host running the database
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to
+ required: false
+ default: 27017
+ login_database:
+ description:
+ - The database where login credentials are stored
+ required: false
+ default: null
+ replica_set:
+ description:
+ - Replica set to connect to (automatically connects to primary for writes)
+ required: false
+ default: null
+ database:
+ description:
+ - The name of the database to add/remove the user from
+ required: true
+ ssl:
+ description:
+ - Whether to use an SSL connection when connecting to the database
+ required: false
+ default: false
+ param:
+ description:
+ - MongoDB administrative parameter to modify
+ required: true
+ value:
+ description:
+ - MongoDB administrative parameter value to set
+ required: true
+ param_type:
+ description:
+ - Define the parameter value (str, int)
+ required: false
+ default: str
+
+notes:
+ - Requires the pymongo Python package on the remote host, version 2.4.2+. This
+ can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
+requirements: [ "pymongo" ]
+author: "Loic Blot (@nerzhul)"
+'''
+
+EXAMPLES = '''
+# Set MongoDB syncdelay to 60 (this is an int)
+- mongodb_parameter: param="syncdelay" value=60 param_type="int"
+'''
+
+RETURN = '''
+before:
+ description: value before modification
+ returned: success
+ type: string
+after:
+ description: value after modification
+ returned: success
+ type: string
+'''
+
+import ConfigParser
+
+try:
+ from pymongo.errors import ConnectionFailure
+ from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
+ from pymongo import MongoClient
+except ImportError:
+ try: # for older PyMongo 2.2
+ from pymongo import Connection as MongoClient
+ except ImportError:
+ pymongo_found = False
+ else:
+ pymongo_found = True
+else:
+ pymongo_found = True
+
+
+# =========================================
+# MongoDB module specific support methods.
+#
+
+def load_mongocnf():
+ config = ConfigParser.RawConfigParser()
+ mongocnf = os.path.expanduser('~/.mongodb.cnf')
+
+ try:
+ config.readfp(open(mongocnf))
+ creds = dict(
+ user=config.get('client', 'user'),
+ password=config.get('client', 'pass')
+ )
+ except (ConfigParser.NoOptionError, IOError):
+ return False
+
+ return creds
+
+
+# =========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default='localhost'),
+ login_port=dict(default=27017, type='int'),
+ login_database=dict(default=None),
+ replica_set=dict(default=None),
+ param=dict(default=None, required=True),
+ value=dict(default=None, required=True),
+ param_type=dict(default="str", choices=['str', 'int']),
+ ssl=dict(default=False, type='bool'),
+ )
+ )
+
+ if not pymongo_found:
+ module.fail_json(msg='the python pymongo module is required')
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_database = module.params['login_database']
+
+ replica_set = module.params['replica_set']
+ ssl = module.params['ssl']
+
+ param = module.params['param']
+ param_type = module.params['param_type']
+ value = module.params['value']
+
+ # Verify parameter is coherent with specified type
+ try:
+ if param_type == 'int':
+ value = int(value)
+ except ValueError, e:
+ module.fail_json(msg="value '%s' is not %s" % (value, param_type))
+
+ try:
+ if replica_set:
+ client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
+ else:
+ client = MongoClient(login_host, int(login_port), ssl=ssl)
+
+ if login_user is None and login_password is None:
+ mongocnf_creds = load_mongocnf()
+ if mongocnf_creds is not False:
+ login_user = mongocnf_creds['user']
+ login_password = mongocnf_creds['password']
+ elif login_password is None or login_user is None:
+ module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
+
+ if login_user is not None and login_password is not None:
+ client.admin.authenticate(login_user, login_password, source=login_database)
+
+ except ConnectionFailure, e:
+ module.fail_json(msg='unable to connect to database: %s' % str(e))
+
+ db = client.admin
+
+ try:
+ after_value = db.command("setParameter", **{param: int(value)})
+ except OperationFailure, e:
+ module.fail_json(msg="unable to change parameter: %s" % str(e))
+
+ if "was" not in after_value:
+ module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
+ else:
+ module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
+ after=value)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/misc/mongodb_user.py b/lib/ansible/modules/extras/database/misc/mongodb_user.py
new file mode 100644
index 0000000000..33187b35b9
--- /dev/null
+++ b/lib/ansible/modules/extras/database/misc/mongodb_user.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+
+# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
+# Sponsored by Four Kitchens http://fourkitchens.com.
+# (c) 2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: mongodb_user
+short_description: Adds or removes a user from a MongoDB database.
+description:
+ - Adds or removes a user from a MongoDB database.
+version_added: "1.1"
+options:
+ login_user:
+ description:
+ - The username used to authenticate with
+ required: false
+ default: null
+ login_password:
+ description:
+ - The password used to authenticate with
+ required: false
+ default: null
+ login_host:
+ description:
+ - The host running the database
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to
+ required: false
+ default: 27017
+ login_database:
+ version_added: "2.0"
+ description:
+ - The database where login credentials are stored
+ required: false
+ default: null
+ replica_set:
+ version_added: "1.6"
+ description:
+ - Replica set to connect to (automatically connects to primary for writes)
+ required: false
+ default: null
+ database:
+ description:
+ - The name of the database to add/remove the user from
+ required: true
+ name:
+ description:
+ - The name of the user to add or remove
+ required: true
+ default: null
+ aliases: [ 'user' ]
+ password:
+ description:
+ - The password to use for the user
+ required: false
+ default: null
+ ssl:
+ version_added: "1.8"
+ description:
+ - Whether to use an SSL connection when connecting to the database
+ default: False
+ ssl_cert_reqs:
+ version_added: "2.2"
+ description:
+ - Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided.
+ required: false
+ default: "CERT_REQUIRED"
+ choices: ["CERT_REQUIRED", "CERT_OPTIONAL", "CERT_NONE"]
+ roles:
+ version_added: "1.3"
+ description:
+ - "The database user roles valid values could either be one or more of the following strings: 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'"
+ - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
+ - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
+ required: false
+ default: "readWrite"
+ state:
+ state:
+ description:
+ - The database user state
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ update_password:
+ required: false
+ default: always
+ choices: ['always', 'on_create']
+ version_added: "2.1"
+ description:
+ - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
+
+notes:
+ - Requires the pymongo Python package on the remote host, version 2.4.2+. This
+ can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
+requirements: [ "pymongo" ]
+author: "Elliott Foster (@elliotttf)"
+'''
+
+EXAMPLES = '''
+# Create 'burgers' database user with name 'bob' and password '12345'.
+- mongodb_user: database=burgers name=bob password=12345 state=present
+
+# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
+- mongodb_user: database=burgers name=bob password=12345 state=present ssl=True
+
+# Delete 'burgers' database user with name 'bob'.
+- mongodb_user: database=burgers name=bob state=absent
+
+# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
+- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present
+- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present
+- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
+
+# add a user to database in a replica set, the primary server is automatically discovered and written to
+- mongodb_user: database=burgers name=bob replica_set=belcher password=12345 roles='readWriteAnyDatabase' state=present
+
+# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is usefull for oplog access (MONGO_OPLOG_URL).
+# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
+# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
+# This syntax requires mongodb 2.6+ and pymongo 2.5+
+- mongodb_user:
+ login_user: root
+ login_password: root_password
+ database: admin
+ user: oplog_reader
+ password: oplog_reader_password
+ state: present
+ replica_set: belcher
+ roles:
+ - { db: "local" , role: "read" }
+
+'''
+
+import ssl as ssl_lib
+import ConfigParser
+from distutils.version import LooseVersion
+try:
+ from pymongo.errors import ConnectionFailure
+ from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
+ from pymongo import MongoClient
+except ImportError:
+ try: # for older PyMongo 2.2
+ from pymongo import Connection as MongoClient
+ except ImportError:
+ pymongo_found = False
+ else:
+ pymongo_found = True
+else:
+ pymongo_found = True
+
+# =========================================
+# MongoDB module specific support methods.
+#
+
+def check_compatibility(module, client):
+ """Check the compatibility between the driver and the database.
+
+ See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
+
+ Args:
+ module: Ansible module.
+ client (cursor): Mongodb cursor on admin database.
+ """
+ loose_srv_version = LooseVersion(client.server_info()['version'])
+ loose_driver_version = LooseVersion(PyMongoVersion)
+
+ if loose_srv_version >= LooseVersion('3.2') and loose_driver_version <= LooseVersion('3.2'):
+ module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
+
+ elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
+ module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
+
+ elif loose_srv_version >= LooseVersion('2.6') and loose_srv_version <= LooseVersion('2.7'):
+ module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
+
+ elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
+ module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
+
+
+def user_find(client, user, db_name):
+ """Check if the user exists.
+
+ Args:
+ client (cursor): Mongodb cursor on admin database.
+ user (str): User to check.
+ db_name (str): User's database.
+
+ Returns:
+ dict: when user exists, False otherwise.
+ """
+ for mongo_user in client["admin"].system.users.find():
+ if mongo_user['user'] == user:
+ # NOTE: there is no 'db' field in mongo 2.4.
+ if 'db' not in mongo_user:
+ return mongo_user
+
+ if mongo_user["db"] == db_name:
+ return mongo_user
+ return False
+
+
+def user_add(module, client, db_name, user, password, roles):
+ #pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
+ #without reproducing a lot of the logic in database.py of pymongo
+ db = client[db_name]
+
+ if roles is None:
+ db.add_user(user, password, False)
+ else:
+ db.add_user(user, password, None, roles=roles)
+
+def user_remove(module, client, db_name, user):
+ exists = user_find(client, user, db_name)
+ if exists:
+ if module.check_mode:
+ module.exit_json(changed=True, user=user)
+ db = client[db_name]
+ db.remove_user(user)
+ else:
+ module.exit_json(changed=False, user=user)
+
+def load_mongocnf():
+ config = ConfigParser.RawConfigParser()
+ mongocnf = os.path.expanduser('~/.mongodb.cnf')
+
+ try:
+ config.readfp(open(mongocnf))
+ creds = dict(
+ user=config.get('client', 'user'),
+ password=config.get('client', 'pass')
+ )
+ except (ConfigParser.NoOptionError, IOError):
+ return False
+
+ return creds
+
+
+
+def check_if_roles_changed(uinfo, roles, db_name):
+# We must be aware of users which can read the oplog on a replicaset
+# Such users must have access to the local DB, but since this DB does not store users credentials
+# and is not synchronized among replica sets, the user must be stored on the admin db
+# Therefore their structure is the following :
+# {
+# "_id" : "admin.oplog_reader",
+# "user" : "oplog_reader",
+# "db" : "admin", # <-- admin DB
+# "roles" : [
+# {
+# "role" : "read",
+# "db" : "local" # <-- local DB
+# }
+# ]
+# }
+
+ def make_sure_roles_are_a_list_of_dict(roles, db_name):
+ output = list()
+ for role in roles:
+ if isinstance(role, basestring):
+ new_role = { "role": role, "db": db_name }
+ output.append(new_role)
+ else:
+ output.append(role)
+ return output
+
+ roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
+ uinfo_roles = uinfo.get('roles', [])
+
+ if sorted(roles_as_list_of_dict) == sorted(uinfo_roles):
+ return False
+ return True
+
+
+
+# =========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None),
+ login_host=dict(default='localhost'),
+ login_port=dict(default='27017'),
+ login_database=dict(default=None),
+ replica_set=dict(default=None),
+ database=dict(required=True, aliases=['db']),
+ name=dict(required=True, aliases=['user']),
+ password=dict(aliases=['pass']),
+ ssl=dict(default=False, type='bool'),
+ roles=dict(default=None, type='list'),
+ state=dict(default='present', choices=['absent', 'present']),
+ update_password=dict(default="always", choices=["always", "on_create"]),
+ ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not pymongo_found:
+ module.fail_json(msg='the python pymongo module is required')
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_database = module.params['login_database']
+
+ replica_set = module.params['replica_set']
+ db_name = module.params['database']
+ user = module.params['name']
+ password = module.params['password']
+ ssl = module.params['ssl']
+ ssl_cert_reqs = None
+ roles = module.params['roles'] or []
+ state = module.params['state']
+ update_password = module.params['update_password']
+
+ try:
+ connection_params = {
+ "host": login_host,
+ "port": int(login_port),
+ }
+
+ if replica_set:
+ connection_params["replicaset"] = replica_set
+
+ if ssl:
+ connection_params["ssl"] = ssl
+ connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
+
+ client = MongoClient(**connection_params)
+
+ # NOTE: this check must be done ASAP.
+ # We doesn't need to be authenticated.
+ check_compatibility(module, client)
+
+ if login_user is None and login_password is None:
+ mongocnf_creds = load_mongocnf()
+ if mongocnf_creds is not False:
+ login_user = mongocnf_creds['user']
+ login_password = mongocnf_creds['password']
+ elif login_password is None or login_user is None:
+ module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
+
+ if login_user is not None and login_password is not None:
+ client.admin.authenticate(login_user, login_password, source=login_database)
+ elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
+ if db_name != "admin":
+ module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
+ #else: this has to be the first admin user added
+
+ except Exception, e:
+ module.fail_json(msg='unable to connect to database: %s' % str(e))
+
+ if state == 'present':
+ if password is None and update_password == 'always':
+ module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
+
+ try:
+ uinfo = user_find(client, user, db_name)
+ if update_password != 'always' and uinfo:
+ password = None
+ if not check_if_roles_changed(uinfo, roles, db_name):
+ module.exit_json(changed=False, user=user)
+
+ if module.check_mode:
+ module.exit_json(changed=True, user=user)
+
+ user_add(module, client, db_name, user, password, roles)
+ except Exception, e:
+ module.fail_json(msg='Unable to add or update user: %s' % str(e))
+
+ # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
+ #newuinfo = user_find(client, user, db_name)
+ #if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
+ # module.exit_json(changed=False, user=user)
+
+ elif state == 'absent':
+ try:
+ user_remove(module, client, db_name, user)
+ except Exception, e:
+ module.fail_json(msg='Unable to remove user: %s' % str(e))
+
+ module.exit_json(changed=True, user=user)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/database/misc/redis.py b/lib/ansible/modules/extras/database/misc/redis.py
new file mode 100644
index 0000000000..be012e6d79
--- /dev/null
+++ b/lib/ansible/modules/extras/database/misc/redis.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: redis
+short_description: Various redis commands, slave and flush
+description:
+ - Unified utility to interact with redis instances.
+ 'slave' sets a redis instance in slave or master mode.
+ 'flush' flushes all the instance or a specified db.
+ 'config' (new in 1.6), ensures a configuration setting on an instance.
+version_added: "1.3"
+options:
+ command:
+ description:
+ - The selected redis command
+ required: true
+ default: null
+ choices: [ "slave", "flush", "config" ]
+ login_password:
+ description:
+ - The password used to authenticate with (usually not used)
+ required: false
+ default: null
+ login_host:
+ description:
+ - The host running the database
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to
+ required: false
+ default: 6379
+ master_host:
+ description:
+ - The host of the master instance [slave command]
+ required: false
+ default: null
+ master_port:
+ description:
+ - The port of the master instance [slave command]
+ required: false
+ default: null
+ slave_mode:
+ description:
+ - the mode of the redis instance [slave command]
+ required: false
+ default: slave
+ choices: [ "master", "slave" ]
+ db:
+ description:
+ - The database to flush (used in db mode) [flush command]
+ required: false
+ default: null
+ flush_mode:
+ description:
+ - Type of flush (all the dbs in a redis instance or a specific one)
+ [flush command]
+ required: false
+ default: all
+ choices: [ "all", "db" ]
+ name:
+ version_added: 1.6
+ description:
+ - A redis config key.
+ required: false
+ default: null
+ value:
+ version_added: 1.6
+ description:
+ - A redis config value.
+ required: false
+ default: null
+
+
+notes:
+ - Requires the redis-py Python package on the remote host. You can
+ install it with pip (pip install redis) or with a package manager.
+ https://github.com/andymccurdy/redis-py
+ - If the redis master instance we are making slave of is password protected
+ this needs to be in the redis.conf in the masterauth variable
+
+requirements: [ redis ]
+author: "Xabier Larrakoetxea (@slok)"
+'''
+
+EXAMPLES = '''
+# Set local redis instance to be slave of melee.island on port 6377
+- redis: command=slave master_host=melee.island master_port=6377
+
+# Deactivate slave mode
+- redis: command=slave slave_mode=master
+
+# Flush all the redis db
+- redis: command=flush flush_mode=all
+
+# Flush only one db in a redis instance
+- redis: command=flush db=1 flush_mode=db
+
+# Configure local redis to have 10000 max clients
+- redis: command=config name=maxclients value=10000
+
+# Configure local redis to have lua time limit of 100 ms
+- redis: command=config name=lua-time-limit value=100
+'''
+
+try:
+ import redis
+except ImportError:
+ redis_found = False
+else:
+ redis_found = True
+
+
+# ===========================================
+# Redis module specific support methods.
+#
+
+def set_slave_mode(client, master_host, master_port):
+ try:
+ return client.slaveof(master_host, master_port)
+ except Exception:
+ return False
+
+
+def set_master_mode(client):
+ try:
+ return client.slaveof()
+ except Exception:
+ return False
+
+
+def flush(client, db=None):
+ try:
+ if type(db) != int:
+ return client.flushall()
+ else:
+ # The passed client has been connected to the database already
+ return client.flushdb()
+ except Exception:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ command=dict(default=None, choices=['slave', 'flush', 'config']),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default='localhost'),
+ login_port=dict(default=6379, type='int'),
+ master_host=dict(default=None),
+ master_port=dict(default=None, type='int'),
+ slave_mode=dict(default='slave', choices=['master', 'slave']),
+ db=dict(default=None, type='int'),
+ flush_mode=dict(default='all', choices=['all', 'db']),
+ name=dict(default=None),
+ value=dict(default=None)
+ ),
+ supports_check_mode = True
+ )
+
+ if not redis_found:
+ module.fail_json(msg="python redis module is required")
+
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ command = module.params['command']
+
+ # Slave Command section -----------
+ if command == "slave":
+ master_host = module.params['master_host']
+ master_port = module.params['master_port']
+ mode = module.params['slave_mode']
+
+ #Check if we have all the data
+ if mode == "slave": # Only need data if we want to be slave
+ if not master_host:
+ module.fail_json(
+ msg='In slave mode master host must be provided')
+
+ if not master_port:
+ module.fail_json(
+ msg='In slave mode master port must be provided')
+
+ #Connect and check
+ r = redis.StrictRedis(host=login_host,
+ port=login_port,
+ password=login_password)
+ try:
+ r.ping()
+ except Exception, e:
+ module.fail_json(msg="unable to connect to database: %s" % e)
+
+ #Check if we are already in the mode that we want
+ info = r.info()
+ if mode == "master" and info["role"] == "master":
+ module.exit_json(changed=False, mode=mode)
+
+ elif mode == "slave" and\
+ info["role"] == "slave" and\
+ info["master_host"] == master_host and\
+ info["master_port"] == master_port:
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=False, mode=status)
+ else:
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "slave":
+ if module.check_mode or\
+ set_slave_mode(r, master_host, master_port):
+ info = r.info()
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=True, mode=status)
+ else:
+ module.fail_json(msg='Unable to set slave mode')
+
+ else:
+ if module.check_mode or set_master_mode(r):
+ module.exit_json(changed=True, mode=mode)
+ else:
+ module.fail_json(msg='Unable to set master mode')
+
+ # flush Command section -----------
+ elif command == "flush":
+ db = module.params['db']
+ mode = module.params['flush_mode']
+
+ #Check if we have all the data
+ if mode == "db":
+ if db is None:
+ module.fail_json(
+ msg="In db mode the db number must be provided")
+
+ #Connect and check
+ r = redis.StrictRedis(host=login_host,
+ port=login_port,
+ password=login_password,
+ db=db)
+ try:
+ r.ping()
+ except Exception, e:
+ module.fail_json(msg="unable to connect to database: %s" % e)
+
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "all":
+ if module.check_mode or flush(r):
+ module.exit_json(changed=True, flushed=True)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush all databases")
+
+ else:
+ if module.check_mode or flush(r, db):
+ module.exit_json(changed=True, flushed=True, db=db)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+ value = module.params['value']
+
+ r = redis.StrictRedis(host=login_host,
+ port=login_port,
+ password=login_password)
+
+ try:
+ r.ping()
+ except Exception, e:
+ module.fail_json(msg="unable to connect to database: %s" % e)
+
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception, e:
+ module.fail_json(msg="unable to read config: %s" % e)
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception, e:
+ module.fail_json(msg="unable to write config: %s" % e)
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ module.fail_json(msg='A valid command must be provided')
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/database/misc/riak.py b/lib/ansible/modules/extras/database/misc/riak.py
new file mode 100644
index 0000000000..ccdec82f0a
--- /dev/null
+++ b/lib/ansible/modules/extras/database/misc/riak.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+---
+module: riak
+short_description: This module handles some common Riak operations
+description:
+ - This module can be used to join nodes to a cluster, check
+ the status of the cluster.
+version_added: "1.2"
+author:
+ - "James Martin (@jsmartin)"
+ - "Drew Kerrigan (@drewkerrigan)"
+options:
+ command:
+ description:
+ - The command you would like to perform against the cluster.
+ required: false
+ default: null
+ aliases: []
+ choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
+ config_dir:
+ description:
+ - The path to the riak configuration directory
+ required: false
+ default: /etc/riak
+ aliases: []
+ http_conn:
+ description:
+ - The ip address and port that is listening for Riak HTTP queries
+ required: false
+ default: 127.0.0.1:8098
+ aliases: []
+ target_node:
+ description:
+ - The target node for certain operations (join, ping)
+ required: false
+ default: riak@127.0.0.1
+ aliases: []
+ wait_for_handoffs:
+ description:
+ - Number of seconds to wait for handoffs to complete.
+ required: false
+ default: null
+ aliases: []
+ type: 'int'
+ wait_for_ring:
+ description:
+ - Number of seconds to wait for all nodes to agree on the ring.
+ required: false
+ default: null
+ aliases: []
+ type: 'int'
+ wait_for_service:
+ description:
+ - Waits for a riak service to come online before continuing.
+ required: false
+ default: None
+ aliases: []
+ choices: ['kv']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+'''
+
+EXAMPLES = '''
+# Join's a Riak node to another node
+- riak: command=join target_node=riak@10.1.1.1
+
+# Wait for handoffs to finish. Use with async and poll.
+- riak: wait_for_handoffs=yes
+
+# Wait for riak_kv service to startup
+- riak: wait_for_service=kv
+'''
+
+import time
+import socket
+import sys
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+def ring_check(module, riak_admin_bin):
+ cmd = '%s ringready' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and 'TRUE All nodes agree on the ring' in out:
+ return True
+ else:
+ return False
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=False, default=None, choices=[
+ 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ config_dir=dict(default='/etc/riak', type='path'),
+ http_conn=dict(required=False, default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1', required=False),
+ wait_for_handoffs=dict(default=False, type='int'),
+ wait_for_ring=dict(default=False, type='int'),
+ wait_for_service=dict(
+ required=False, default=None, choices=['kv']),
+ validate_certs = dict(default='yes', type='bool'))
+ )
+
+
+ command = module.params.get('command')
+ config_dir = module.params.get('config_dir')
+ http_conn = module.params.get('http_conn')
+ target_node = module.params.get('target_node')
+ wait_for_handoffs = module.params.get('wait_for_handoffs')
+ wait_for_ring = module.params.get('wait_for_ring')
+ wait_for_service = module.params.get('wait_for_service')
+ validate_certs = module.params.get('validate_certs')
+
+
+ #make sure riak commands are on the path
+ riak_bin = module.get_bin_path('riak')
+ riak_admin_bin = module.get_bin_path('riak-admin')
+
+ timeout = time.time() + 120
+ while True:
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout, could not fetch Riak stats.')
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
+ break
+ time.sleep(5)
+
+ # here we attempt to load those stats,
+ try:
+ stats = json.loads(stats_raw)
+ except:
+ module.fail_json(msg='Could not parse Riak stats.')
+
+ node_name = stats['nodename']
+ nodes = stats['ring_members']
+ ring_size = stats['ring_creation_size']
+ rc, out, err = module.run_command([riak_bin, 'version'] )
+ version = out.strip()
+
+ result = dict(node_name=node_name,
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
+
+ if command == 'ping':
+ cmd = '%s ping %s' % ( riak_bin, target_node )
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['ping'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'kv_test':
+ cmd = '%s test' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['kv_test'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'join':
+ if nodes.count(node_name) == 1 and len(nodes) > 1:
+ result['join'] = 'Node is already in cluster or staged to be in cluster.'
+ else:
+ cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['join'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'plan':
+ cmd = '%s cluster plan' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['plan'] = out
+ if 'Staged Changes' in out:
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'commit':
+ cmd = '%s cluster commit' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['commit'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+# this could take a while, recommend to run in async mode
+ if wait_for_handoffs:
+ timeout = time.time() + wait_for_handoffs
+ while True:
+ cmd = '%s transfers' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if 'No transfers active' in out:
+ result['handoffs'] = 'No transfers active.'
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for handoffs.')
+
+ if wait_for_service:
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
+ rc, out, err = module.run_command(cmd)
+ result['service'] = out
+
+ if wait_for_ring:
+ timeout = time.time() + wait_for_ring
+ while True:
+ if ring_check(module, riak_admin_bin):
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
+
+ result['ring_ready'] = ring_check(module, riak_admin_bin)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/mssql/__init__.py b/lib/ansible/modules/extras/database/mssql/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/mssql/__init__.py
diff --git a/lib/ansible/modules/extras/database/mssql/mssql_db.py b/lib/ansible/modules/extras/database/mssql/mssql_db.py
new file mode 100644
index 0000000000..45642c579f
--- /dev/null
+++ b/lib/ansible/modules/extras/database/mssql/mssql_db.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to manage mssql databases
+# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
+# Outline and parts are reused from Mark Theunissen's mysql_db module
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: mssql_db
+short_description: Add or remove MSSQL databases from a remote host.
+description:
+ - Add or remove MSSQL databases from a remote host.
+version_added: "2.2"
+options:
+ name:
+ description:
+ - name of the database to add or remove
+ required: true
+ default: null
+ aliases: [ db ]
+ login_user:
+ description:
+ - The username used to authenticate with
+ required: false
+ default: null
+ login_password:
+ description:
+ - The password used to authenticate with
+ required: false
+ default: null
+ login_host:
+ description:
+ - Host running the database
+ required: false
+ login_port:
+ description:
+ - Port of the MSSQL server. Requires login_host be defined as other then localhost if login_port is used
+ required: false
+ default: 1433
+ state:
+ description:
+ - The database state
+ required: false
+ default: present
+ choices: [ "present", "absent", "import" ]
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
+ files (C(.sql)) files are supported.
+ required: false
+ autocommit:
+ description:
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed within a transaction.
+ required: false
+ default: false
+ choices: [ "false", "true" ]
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as pip install pymssql (See M(pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+author: Vedit Firat Arig
+'''
+
+EXAMPLES = '''
+# Create a new database with name 'jackdata'
+- mssql_db: name=jackdata state=present
+# Copy database dump file to remote host and restore it to database 'my_db'
+- copy: src=dump.sql dest=/tmp
+- mssql_db: name=my_db state=import target=/tmp/dump.sql
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+try:
+ import pymssql
+except ImportError:
+ mssql_found = False
+else:
+ mssql_found = True
+
+
+def db_exists(conn, cursor, db):
+ cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
+ conn.commit()
+ return bool(cursor.rowcount)
+
+
+def db_create(conn, cursor, db):
+ cursor.execute("CREATE DATABASE [%s]" % db)
+ return db_exists(conn, cursor, db)
+
+
+def db_delete(conn, cursor, db):
+ try:
+ cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
+ except:
+ pass
+ cursor.execute("DROP DATABASE [%s]" % db)
+ return not db_exists(conn, cursor, db)
+
+def db_import(conn, cursor, module, db, target):
+ if os.path.isfile(target):
+ backup = open(target, 'r')
+ try:
+ sqlQuery = "USE [%s]\n" % db
+ for line in backup:
+ if line is None:
+ break
+ elif line.startswith('GO'):
+ cursor.execute(sqlQuery)
+ sqlQuery = "USE [%s]\n" % db
+ else:
+ sqlQuery += line
+ cursor.execute(sqlQuery)
+ conn.commit()
+ finally:
+ backup.close()
+ return 0, "import successful", ""
+ else:
+ return 1, "cannot find target file", "cannot find target file"
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['db']),
+ login_user=dict(default=''),
+ login_password=dict(default=''),
+ login_host=dict(required=True),
+ login_port=dict(default='1433'),
+ target=dict(default=None),
+ autocommit=dict(type='bool', default=False),
+ state=dict(
+ default='present', choices=['present', 'absent', 'import'])
+ )
+ )
+
+ if not mssql_found:
+ module.fail_json(msg="pymssql python module is required")
+
+ db = module.params['name']
+ state = module.params['state']
+ autocommit = module.params['autocommit']
+ target = module.params["target"]
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+
+ login_querystring = login_host
+ if login_port != "1433":
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user != "" and login_password == "":
+ module.fail_json(msg="when supplying login_user arguments login_password must be provided")
+
+ try:
+ conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your @sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+ changed = False
+
+ if db_exists(conn, cursor, db):
+ if state == "absent":
+ try:
+ changed = db_delete(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: " + str(e))
+ elif state == "import":
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+ else:
+ if state == "present":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+ elif state == "import":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+
+ module.exit_json(changed=changed, db=db)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/database/mysql/__init__.py b/lib/ansible/modules/extras/database/mysql/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/mysql/__init__.py
diff --git a/lib/ansible/modules/extras/database/mysql/mysql_replication.py b/lib/ansible/modules/extras/database/mysql/mysql_replication.py
new file mode 100644
index 0000000000..551875a0d5
--- /dev/null
+++ b/lib/ansible/modules/extras/database/mysql/mysql_replication.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+
+Ansible module to manage mysql replication
+(c) 2013, Balazs Pocze <banyek@gawker.com>
+Certain parts are taken from Mark Theunissen's mysqldb module
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: mysql_replication
+
+short_description: Manage MySQL replication
+description:
+ - Manages MySQL server replication, slave, master status get and change master host.
+version_added: "1.3"
+author: "Balazs Pocze (@banyek)"
+options:
+ mode:
+ description:
+ - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE), resetslave (RESET SLAVE), resetslaveall (RESET SLAVE ALL)
+ required: False
+ choices:
+ - getslave
+ - getmaster
+ - changemaster
+ - stopslave
+ - startslave
+ - resetslave
+ - resetslaveall
+ default: getslave
+ master_host:
+ description:
+ - same as mysql variable
+ master_user:
+ description:
+ - same as mysql variable
+ master_password:
+ description:
+ - same as mysql variable
+ master_port:
+ description:
+ - same as mysql variable
+ master_connect_retry:
+ description:
+ - same as mysql variable
+ master_log_file:
+ description:
+ - same as mysql variable
+ master_log_pos:
+ description:
+ - same as mysql variable
+ relay_log_file:
+ description:
+ - same as mysql variable
+ relay_log_pos:
+ description:
+ - same as mysql variable
+ master_ssl:
+ description:
+ - same as mysql variable
+ choices: [ 0, 1 ]
+ master_ssl_ca:
+ description:
+ - same as mysql variable
+ master_ssl_capath:
+ description:
+ - same as mysql variable
+ master_ssl_cert:
+ description:
+ - same as mysql variable
+ master_ssl_key:
+ description:
+ - same as mysql variable
+ master_ssl_cipher:
+ description:
+ - same as mysql variable
+ master_auto_position:
+ description:
+ - does the host uses GTID based replication or not
+ required: false
+ default: null
+ version_added: "2.0"
+
+extends_documentation_fragment: mysql
+'''
+
+EXAMPLES = '''
+# Stop mysql slave thread
+- mysql_replication: mode=stopslave
+
+# Get master binlog file name and binlog position
+- mysql_replication: mode=getmaster
+
+# Change master to master server 192.0.2.1 and use binary log 'mysql-bin.000009' with position 4578
+- mysql_replication: mode=changemaster master_host=192.0.2.1 master_log_file=mysql-bin.000009 master_log_pos=4578
+
+# Check slave status using port 3308
+- mysql_replication: mode=getslave login_host=ansible.example.com login_port=3308
+'''
+
+import os
+import warnings
+
+try:
+ import MySQLdb
+except ImportError:
+ mysqldb_found = False
+else:
+ mysqldb_found = True
+
+
+def get_master_status(cursor):
+ cursor.execute("SHOW MASTER STATUS")
+ masterstatus = cursor.fetchone()
+ return masterstatus
+
+
+def get_slave_status(cursor):
+ cursor.execute("SHOW SLAVE STATUS")
+ slavestatus = cursor.fetchone()
+ return slavestatus
+
+
+def stop_slave(cursor):
+ try:
+ cursor.execute("STOP SLAVE")
+ stopped = True
+ except:
+ stopped = False
+ return stopped
+
+
+def reset_slave(cursor):
+ try:
+ cursor.execute("RESET SLAVE")
+ reset = True
+ except:
+ reset = False
+ return reset
+
+
+def reset_slave_all(cursor):
+ try:
+ cursor.execute("RESET SLAVE ALL")
+ reset = True
+ except:
+ reset = False
+ return reset
+
+
+def start_slave(cursor):
+ try:
+ cursor.execute("START SLAVE")
+ started = True
+ except:
+ started = False
+ return started
+
+
+def changemaster(cursor, chm, chm_params):
+ sql_param = ",".join(chm)
+ query = 'CHANGE MASTER TO %s' % sql_param
+ cursor.execute(query, chm_params)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default="localhost"),
+ login_port=dict(default=3306, type='int'),
+ login_unix_socket=dict(default=None),
+ mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave", "resetslave", "resetslaveall"]),
+ master_auto_position=dict(default=False, type='bool'),
+ master_host=dict(default=None),
+ master_user=dict(default=None),
+ master_password=dict(default=None, no_log=True),
+ master_port=dict(default=None, type='int'),
+ master_connect_retry=dict(default=None, type='int'),
+ master_log_file=dict(default=None),
+ master_log_pos=dict(default=None, type='int'),
+ relay_log_file=dict(default=None),
+ relay_log_pos=dict(default=None, type='int'),
+ master_ssl=dict(default=False, type='bool'),
+ master_ssl_ca=dict(default=None),
+ master_ssl_capath=dict(default=None),
+ master_ssl_cert=dict(default=None),
+ master_ssl_key=dict(default=None),
+ master_ssl_cipher=dict(default=None),
+ connect_timeout=dict(default=30, type='int'),
+ config_file=dict(default="~/.my.cnf", type='path'),
+ ssl_cert=dict(default=None),
+ ssl_key=dict(default=None),
+ ssl_ca=dict(default=None),
+ )
+ )
+ user = module.params["login_user"]
+ password = module.params["login_password"]
+ host = module.params["login_host"]
+ port = module.params["login_port"]
+ mode = module.params["mode"]
+ master_host = module.params["master_host"]
+ master_user = module.params["master_user"]
+ master_password = module.params["master_password"]
+ master_port = module.params["master_port"]
+ master_connect_retry = module.params["master_connect_retry"]
+ master_log_file = module.params["master_log_file"]
+ master_log_pos = module.params["master_log_pos"]
+ relay_log_file = module.params["relay_log_file"]
+ relay_log_pos = module.params["relay_log_pos"]
+ master_ssl = module.params["master_ssl"]
+ master_ssl_ca = module.params["master_ssl_ca"]
+ master_ssl_capath = module.params["master_ssl_capath"]
+ master_ssl_cert = module.params["master_ssl_cert"]
+ master_ssl_key = module.params["master_ssl_key"]
+ master_ssl_cipher = module.params["master_ssl_cipher"]
+ master_auto_position = module.params["master_auto_position"]
+ ssl_cert = module.params["ssl_cert"]
+ ssl_key = module.params["ssl_key"]
+ ssl_ca = module.params["ssl_ca"]
+ connect_timeout = module.params['connect_timeout']
+ config_file = module.params['config_file']
+
+ if not mysqldb_found:
+ module.fail_json(msg="the python mysqldb module is required")
+ else:
+ warnings.filterwarnings('error', category=MySQLdb.Warning)
+
+ login_password = module.params["login_password"]
+ login_user = module.params["login_user"]
+
+ try:
+ cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, None, 'MySQLdb.cursors.DictCursor',
+ connect_timeout=connect_timeout)
+ except Exception, e:
+ if os.path.exists(config_file):
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
+ else:
+ module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
+
+ if mode in "getmaster":
+ status = get_master_status(cursor)
+ if not isinstance(status, dict):
+ status = dict(Is_Master=False, msg="Server is not configured as mysql master")
+ else:
+ status['Is_Master'] = True
+ module.exit_json(**status)
+
+ elif mode in "getslave":
+ status = get_slave_status(cursor)
+ if not isinstance(status, dict):
+ status = dict(Is_Slave=False, msg="Server is not configured as mysql slave")
+ else:
+ status['Is_Slave'] = True
+ module.exit_json(**status)
+
+ elif mode in "changemaster":
+ chm=[]
+ chm_params = {}
+ result = {}
+ if master_host:
+ chm.append("MASTER_HOST=%(master_host)s")
+ chm_params['master_host'] = master_host
+ if master_user:
+ chm.append("MASTER_USER=%(master_user)s")
+ chm_params['master_user'] = master_user
+ if master_password:
+ chm.append("MASTER_PASSWORD=%(master_password)s")
+ chm_params['master_password'] = master_password
+ if master_port is not None:
+ chm.append("MASTER_PORT=%(master_port)s")
+ chm_params['master_port'] = master_port
+ if master_connect_retry is not None:
+ chm.append("MASTER_CONNECT_RETRY=%(master_connect_retry)s")
+ chm_params['master_connect_retry'] = master_connect_retry
+ if master_log_file:
+ chm.append("MASTER_LOG_FILE=%(master_log_file)s")
+ chm_params['master_log_file'] = master_log_file
+ if master_log_pos is not None:
+ chm.append("MASTER_LOG_POS=%(master_log_pos)s")
+ chm_params['master_log_pos'] = master_log_pos
+ if relay_log_file:
+ chm.append("RELAY_LOG_FILE=%(relay_log_file)s")
+ chm_params['relay_log_file'] = relay_log_file
+ if relay_log_pos is not None:
+ chm.append("RELAY_LOG_POS=%(relay_log_pos)s")
+ chm_params['relay_log_pos'] = relay_log_pos
+ if master_ssl:
+ chm.append("MASTER_SSL=1")
+ if master_ssl_ca:
+ chm.append("MASTER_SSL_CA=%(master_ssl_ca)s")
+ chm_params['master_ssl_ca'] = master_ssl_ca
+ if master_ssl_capath:
+ chm.append("MASTER_SSL_CAPATH=%(master_ssl_capath)s")
+ chm_params['master_ssl_capath'] = master_ssl_capath
+ if master_ssl_cert:
+ chm.append("MASTER_SSL_CERT=%(master_ssl_cert)s")
+ chm_params['master_ssl_cert'] = master_ssl_cert
+ if master_ssl_key:
+ chm.append("MASTER_SSL_KEY=%(master_ssl_key)s")
+ chm_params['master_ssl_key'] = master_ssl_key
+ if master_ssl_cipher:
+ chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s")
+ chm_params['master_ssl_cipher'] = master_ssl_cipher
+ if master_auto_position:
+ chm.append("MASTER_AUTO_POSITION = 1")
+ try:
+ changemaster(cursor, chm, chm_params)
+ except MySQLdb.Warning, e:
+ result['warning'] = str(e)
+ except Exception, e:
+ module.fail_json(msg='%s. Query == CHANGE MASTER TO %s' % (e, chm))
+ result['changed']=True
+ module.exit_json(**result)
+ elif mode in "startslave":
+ started = start_slave(cursor)
+ if started is True:
+ module.exit_json(msg="Slave started ", changed=True)
+ else:
+ module.exit_json(msg="Slave already started (Or cannot be started)", changed=False)
+ elif mode in "stopslave":
+ stopped = stop_slave(cursor)
+ if stopped is True:
+ module.exit_json(msg="Slave stopped", changed=True)
+ else:
+ module.exit_json(msg="Slave already stopped", changed=False)
+ elif mode in "resetslave":
+ reset = reset_slave(cursor)
+ if reset is True:
+ module.exit_json(msg="Slave reset", changed=True)
+ else:
+ module.exit_json(msg="Slave already reset", changed=False)
+ elif mode in "resetslaveall":
+ reset = reset_slave_all(cursor)
+ if reset is True:
+ module.exit_json(msg="Slave reset", changed=True)
+ else:
+ module.exit_json(msg="Slave already reset", changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.mysql import *
+main()
+warnings.simplefilter("ignore")
diff --git a/lib/ansible/modules/extras/database/postgresql/__init__.py b/lib/ansible/modules/extras/database/postgresql/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/postgresql/__init__.py
diff --git a/lib/ansible/modules/extras/database/postgresql/postgresql_ext.py b/lib/ansible/modules/extras/database/postgresql/postgresql_ext.py
new file mode 100644
index 0000000000..684f3b2c32
--- /dev/null
+++ b/lib/ansible/modules/extras/database/postgresql/postgresql_ext.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database.
+description:
+ - Add or remove PostgreSQL extensions from a database.
+version_added: "1.9"
+options:
+ name:
+ description:
+ - name of the extension to add or remove
+ required: true
+ default: null
+ db:
+ description:
+ - name of the database to add or remove the extension to/from
+ required: true
+ default: null
+ login_user:
+ description:
+ - The username used to authenticate with
+ required: false
+ default: null
+ login_password:
+ description:
+ - The password used to authenticate with
+ required: false
+ default: null
+ login_host:
+ description:
+ - Host running the database
+ required: false
+ default: localhost
+ port:
+ description:
+ - Database port to connect to.
+ required: false
+ default: 5432
+ state:
+ description:
+ - The database extension state
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
+ - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
+ the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
+requirements: [ psycopg2 ]
+author: "Daniel Schep (@dschep)"
+'''
+
+EXAMPLES = '''
+# Adds postgis to the database "acme"
+- postgresql_ext: name=postgis db=acme
+'''
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ postgresqldb_found = False
+else:
+ postgresqldb_found = True
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def ext_exists(cursor, ext):
+ query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
+ cursor.execute(query, {'ext': ext})
+ return cursor.rowcount == 1
+
+def ext_delete(cursor, ext):
+ if ext_exists(cursor, ext):
+ query = "DROP EXTENSION \"%s\"" % ext
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+def ext_create(cursor, ext):
+ if not ext_exists(cursor, ext):
+ query = 'CREATE EXTENSION "%s"' % ext
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default="postgres"),
+ login_password=dict(default=""),
+ login_host=dict(default=""),
+ port=dict(default="5432"),
+ db=dict(required=True),
+ ext=dict(required=True, aliases=['name']),
+ state=dict(default="present", choices=["absent", "present"]),
+ ),
+ supports_check_mode = True
+ )
+
+ if not postgresqldb_found:
+ module.fail_json(msg="the python psycopg2 module is required")
+
+ db = module.params["db"]
+ ext = module.params["ext"]
+ port = module.params["port"]
+ state = module.params["state"]
+ changed = False
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host":"host",
+ "login_user":"user",
+ "login_password":"password",
+ "port":"port"
+ }
+ kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
+ if k in params_map and v != '' )
+ try:
+ db_connection = psycopg2.connect(database=db, **kw)
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2
+ .extensions
+ .ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(
+ cursor_factory=psycopg2.extras.DictCursor)
+ except Exception, e:
+ module.fail_json(msg="unable to connect to database: %s" % e)
+
+ try:
+ if module.check_mode:
+ if state == "present":
+ changed = not ext_exists(cursor, ext)
+ elif state == "absent":
+ changed = ext_exists(cursor, ext)
+ else:
+ if state == "absent":
+ changed = ext_delete(cursor, ext)
+
+ elif state == "present":
+ changed = ext_create(cursor, ext)
+ except NotSupportedError, e:
+ module.fail_json(msg=str(e))
+ except Exception, e:
+ module.fail_json(msg="Database query failed: %s" % e)
+
+ module.exit_json(changed=changed, db=db, ext=ext)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/database/postgresql/postgresql_lang.py b/lib/ansible/modules/extras/database/postgresql/postgresql_lang.py
new file mode 100644
index 0000000000..ccee93194e
--- /dev/null
+++ b/lib/ansible/modules/extras/database/postgresql/postgresql_lang.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database.
+description:
+ - Adds, removes or changes procedural languages with a PostgreSQL database.
+ - This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database. The module can be used on the machine
+ where executed or on a remote host.
+ - When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify casade to
+ automatically drop objects that depend on the language (such as functions in the
+ language). In case the language can't be deleted because it is required by the
+ database system, you can specify fail_on_drop=no to ignore the error.
+ - Be carefull when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+version_added: "1.7"
+options:
+ lang:
+ description:
+ - name of the procedural language to add, remove or change
+ required: true
+ default: null
+ trust:
+ description:
+ - make this language trusted for the selected db
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ db:
+ description:
+ - name of database where the language will be added, removed or changed
+ required: false
+ default: null
+ force_trust:
+ description:
+ - marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - use with care!
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ fail_on_drop:
+ description:
+ - if C(yes), fail when removing a language. Otherwise just log and continue
+ - in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade).
+ required: false
+ default: 'yes'
+ choices: [ "yes", "no" ]
+ cascade:
+ description:
+ - when dropping a language, also delete object that depend on this language.
+ - only used when C(state=absent).
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ port:
+ description:
+ - Database port to connect to.
+ required: false
+ default: 5432
+ login_user:
+ description:
+ - User used to authenticate with PostgreSQL
+ required: false
+ default: postgres
+ login_password:
+ description:
+ - Password used to authenticate with PostgreSQL (must match C(login_user))
+ required: false
+ default: null
+ login_host:
+ description:
+ - Host running PostgreSQL where you want to execute the actions.
+ required: false
+ default: localhost
+ state:
+ description:
+ - The state of the language for the selected database
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+notes:
+ - The default authentication assumes that you are either logging in as or
+ sudo'ing to the postgres account on the host.
+ - This module uses psycopg2, a Python PostgreSQL database adapter. You must
+ ensure that psycopg2 is installed on the host before using this module. If
+ the remote host is the PostgreSQL server (which is the default case), then
+ PostgreSQL must also be installed on the remote host. For Ubuntu-based
+ systems, install the postgresql, libpq-dev, and python-psycopg2 packages
+ on the remote host before using this module.
+requirements: [ psycopg2 ]
+author: "Jens Depuydt (@jensdepuydt)"
+'''
+
+EXAMPLES = '''
+# Add language pltclu to database testdb if it doesn't exist:
+- postgresql_lang db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted:
+# Marks the language as trusted if it exists but isn't trusted yet
+# force_trust makes sure that the language will be marked as trusted
+- postgresql_lang db=testdb lang=pltclu state=present trust=yes force_trust=yes
+
+# Remove language pltclu from database testdb:
+- postgresql_lang: db=testdb lang=pltclu state=absent
+
+# Remove language pltclu from database testdb and remove all dependencies:
+- postgresql_lang: db=testdb lang=pltclu state=absent cascade=yes
+
+# Remove language c from database testdb but ignore errors if something prevents the removal:
+- postgresql_lang: db=testdb lang=pltclu state=absent fail_on_drop=no
+'''
+
+try:
+ import psycopg2
+except ImportError:
+ postgresqldb_found = False
+else:
+ postgresqldb_found = True
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
+ cursor.execute(query)
+ return cursor.fetchone()[0]
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
+ cursor.execute(query, (trust, lang))
+ return True
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ cursor.execute(query)
+ return True
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang)
+ else:
+ cursor.execute("DROP LANGUAGE \"%s\"" % lang)
+ except:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default="postgres"),
+ login_password=dict(default=""),
+ login_host=dict(default=""),
+ db=dict(required=True),
+ port=dict(default='5432'),
+ lang=dict(required=True),
+ state=dict(default="present", choices=["absent", "present"]),
+ trust=dict(type='bool', default='no'),
+ force_trust=dict(type='bool', default='no'),
+ cascade=dict(type='bool', default='no'),
+ fail_on_drop=dict(type='bool', default='yes'),
+ ),
+ supports_check_mode = True
+ )
+
+ db = module.params["db"]
+ port = module.params["port"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+
+ if not postgresqldb_found:
+ module.fail_json(msg="the python psycopg2 module is required")
+
+ params_map = {
+ "login_host":"host",
+ "login_user":"user",
+ "login_password":"password",
+ "port":"port",
+ "db":"database"
+ }
+ kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
+ if k in params_map and v != "" )
+ try:
+ db_connection = psycopg2.connect(**kw)
+ cursor = db_connection.cursor()
+ except Exception, e:
+ module.fail_json(msg="unable to connect to database: %s" % e)
+ changed = False
+ lang_dropped = False
+ kw = dict(db=db,lang=lang,trust=trust)
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore"
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ module.exit_json(**kw)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/database/vertica/__init__.py b/lib/ansible/modules/extras/database/vertica/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/database/vertica/__init__.py
diff --git a/lib/ansible/modules/extras/database/vertica/vertica_configuration.py b/lib/ansible/modules/extras/database/vertica/vertica_configuration.py
new file mode 100644
index 0000000000..ed75667b13
--- /dev/null
+++ b/lib/ansible/modules/extras/database/vertica/vertica_configuration.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: vertica_configuration
+version_added: '2.0'
+short_description: Updates Vertica configuration parameters.
+description:
+ - Updates Vertica configuration parameters.
+options:
+ name:
+ description:
+ - Name of the parameter to update.
+ required: true
+ value:
+ description:
+ - Value of the parameter to be set.
+ required: true
+ db:
+ description:
+ - Name of the Vertica database.
+ required: false
+ default: null
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ required: false
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ required: false
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ required: false
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+ required: false
+ default: null
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+"""
+
+EXAMPLES = """
+- name: updating load_balance_policy
+ vertica_configuration: name=failovertostandbyafter value='8 hours'
+"""
+
+try:
+ import pyodbc
+except ImportError:
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+class NotSupportedError(Exception):
+ pass
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+def get_configuration_facts(cursor, parameter_name=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter_name, parameter_name)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+def check(configuration_facts, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ return False
+ return True
+
+def present(configuration_facts, cursor, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ changed = False
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
+ changed = True
+ if changed:
+ configuration_facts.update(get_configuration_facts(cursor, parameter_name))
+ return changed
+
+# module logic
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ parameter=dict(required=True, aliases=['name']),
+ value=dict(default=None),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None),
+ ), supports_check_mode = True)
+
+ if not pyodbc_found:
+ module.fail_json(msg="The python pyodbc module is required.")
+
+ parameter_name = module.params['parameter']
+ current_value = module.params['value']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception, e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ configuration_facts = get_configuration_facts(cursor)
+ if module.check_mode:
+ changed = not check(configuration_facts, parameter_name, current_value)
+ else:
+ try:
+ changed = present(configuration_facts, cursor, parameter_name, current_value)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ except NotSupportedError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except CannotDropError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception, e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
+
+# import ansible utilities
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/vertica/vertica_facts.py b/lib/ansible/modules/extras/database/vertica/vertica_facts.py
new file mode 100644
index 0000000000..705b74a04f
--- /dev/null
+++ b/lib/ansible/modules/extras/database/vertica/vertica_facts.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: vertica_facts
+version_added: '2.0'
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database facts.
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ required: false
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ required: false
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ required: false
+ default: null
+ login_user:
+ description:
+ - The username used to authenticate with.
+ required: false
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+ required: false
+ default: null
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+"""
+
+EXAMPLES = """
+- name: gathering vertica facts
+ vertica_facts: db=db_name
+"""
+
+try:
+ import pyodbc
+except ImportError:
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None),
+ ), supports_check_mode = True)
+
+ if not pyodbc_found:
+ module.fail_json(msg="The python pyodbc module is required.")
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception, e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ except NotSupportedError, e:
+ module.fail_json(msg=str(e))
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception, e:
+ module.fail_json(msg=e)
+
+# import ansible utilities
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/vertica/vertica_role.py b/lib/ansible/modules/extras/database/vertica/vertica_role.py
new file mode 100644
index 0000000000..b7a0a5d66e
--- /dev/null
+++ b/lib/ansible/modules/extras/database/vertica/vertica_role.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: vertica_role
+version_added: '2.0'
+short_description: Adds or removes Vertica database roles and assigns roles to them.
+description:
+ - Adds or removes Vertica database role and, optionally, assign other roles.
+options:
+ name:
+ description:
+ - Name of the role to add or remove.
+ required: true
+ assigned_roles:
+ description:
+ - Comma separated list of roles to assign to the role.
+ aliases: ['assigned_role']
+ required: false
+ default: null
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ required: false
+ default: null
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ required: false
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ required: false
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ required: false
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+ required: false
+ default: null
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+"""
+
+EXAMPLES = """
+- name: creating a new vertica role
+ vertica_role: name=role_name db=db_name state=present
+
+- name: creating a new vertica role with other role assigned
+ vertica_role: name=role_name assigned_role=other_role_name state=present
+"""
+
+try:
+ import pyodbc
+except ImportError:
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+class NotSupportedError(Exception):
+ pass
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+def update_roles(role_facts, cursor, role,
+ existing, required):
+ for assigned_role in set(existing) - set(required):
+ cursor.execute("revoke {0} from {1}".format(assigned_role, role))
+ for assigned_role in set(required) - set(existing):
+ cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
+def check(role_facts, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ return False
+ if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
+ return False
+ return True
+
+def present(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ cursor.execute("create role {0}".format(role))
+ update_roles(role_facts, cursor, role, [], assigned_roles)
+ role_facts.update(get_role_facts(cursor, role))
+ return True
+ else:
+ changed = False
+ if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], assigned_roles)
+ changed = True
+ if changed:
+ role_facts.update(get_role_facts(cursor, role))
+ return changed
+
+def absent(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key in role_facts:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], [])
+ cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
+ del role_facts[role_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ role=dict(required=True, aliases=['name']),
+ assigned_roles=dict(default=None, aliases=['assigned_role']),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None),
+ ), supports_check_mode = True)
+
+ if not pyodbc_found:
+ module.fail_json(msg="The python pyodbc module is required.")
+
+ role = module.params['role']
+ assigned_roles = []
+ if module.params['assigned_roles']:
+ assigned_roles = module.params['assigned_roles'].split(',')
+ assigned_roles = filter(None, assigned_roles)
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception, e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ role_facts = get_role_facts(cursor)
+ if module.check_mode:
+ changed = not check(role_facts, role, assigned_roles)
+ elif state == 'absent':
+ try:
+ changed = absent(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ try:
+ changed = present(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ except NotSupportedError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
+ except CannotDropError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception, e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
+
+# import ansible utilities
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/vertica/vertica_schema.py b/lib/ansible/modules/extras/database/vertica/vertica_schema.py
new file mode 100644
index 0000000000..39ccb0b60e
--- /dev/null
+++ b/lib/ansible/modules/extras/database/vertica/vertica_schema.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: vertica_schema
+version_added: '2.0'
+short_description: Adds or removes Vertica database schema and roles.
+description:
+ - Adds or removes Vertica database schema and, optionally, roles
+ with schema access privileges.
+ - A schema will not be removed until all the objects have been dropped.
+ - In such a situation, if the module tries to remove the schema it
+ will fail and only remove roles created for the schema if they have
+ no dependencies.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ usage_roles:
+ description:
+ - Comma separated list of roles to create and grant usage access to the schema.
+ aliases: ['usage_role']
+ required: false
+ default: null
+ create_roles:
+ description:
+ - Comma separated list of roles to create and grant usage and create access to the schema.
+ aliases: ['create_role']
+ required: false
+ default: null
+ owner:
+ description:
+ - Name of the user to set as owner of the schema.
+ required: false
+ default: null
+ state:
+ description:
+ - Whether to create C(present), or drop C(absent) a schema.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ db:
+ description:
+ - Name of the Vertica database.
+ required: false
+ default: null
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ required: false
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ required: false
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ required: false
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+ required: false
+ default: null
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+"""
+
+EXAMPLES = """
+- name: creating a new vertica schema
+ vertica_schema: name=schema_name db=db_name state=present
+
+- name: creating a new schema with specific schema owner
+ vertica_schema: name=schema_name owner=dbowner db=db_name state=present
+
+- name: creating a new schema with roles
+ vertica_schema:
+ name=schema_name
+ create_roles=schema_name_all
+ usage_roles=schema_name_ro,schema_name_rw
+ db=db_name
+ state=present
+"""
+
+try:
+ import pyodbc
+except ImportError:
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+class NotSupportedError(Exception):
+ pass
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public', 'TxtIndex')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee_id = r.role_id and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+def update_roles(schema_facts, cursor, schema,
+ existing, required,
+ create_existing, create_required):
+ for role in set(existing + create_existing) - set(required + create_required):
+ cursor.execute("drop role {0} cascade".format(role))
+ for role in set(create_existing) - set(create_required):
+ cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
+ for role in set(required + create_required) - set(existing + create_existing):
+ cursor.execute("create role {0}".format(role))
+ cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
+ for role in set(create_required) - set(create_existing):
+ cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
+def check(schema_facts, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ return False
+ if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
+ return False
+ if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0:
+ return False
+ if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0:
+ return False
+ return True
+
+def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ query_fragments = ["create schema {0}".format(schema)]
+ if owner:
+ query_fragments.append("authorization {0}".format(owner))
+ cursor.execute(' '.join(query_fragments))
+ update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return True
+ else:
+ changed = False
+ if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
+ raise NotSupportedError((
+ "Changing schema owner is not supported. "
+ "Current owner: {0}."
+ ).format(schema_facts[schema_key]['owner']))
+ if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \
+ cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
+ changed = True
+ if changed:
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return changed
+
+def absent(schema_facts, cursor, schema, usage_roles, create_roles):
+ schema_key = schema.lower()
+ if schema_key in schema_facts:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ try:
+ cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping schema failed due to dependencies.")
+ del schema_facts[schema_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ schema=dict(required=True, aliases=['name']),
+ usage_roles=dict(default=None, aliases=['usage_role']),
+ create_roles=dict(default=None, aliases=['create_role']),
+ owner=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None),
+ ), supports_check_mode = True)
+
+ if not pyodbc_found:
+ module.fail_json(msg="The python pyodbc module is required.")
+
+ schema = module.params['schema']
+ usage_roles = []
+ if module.params['usage_roles']:
+ usage_roles = module.params['usage_roles'].split(',')
+ usage_roles = filter(None, usage_roles)
+ create_roles = []
+ if module.params['create_roles']:
+ create_roles = module.params['create_roles'].split(',')
+ create_roles = filter(None, create_roles)
+ owner = module.params['owner']
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception, e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ if module.check_mode:
+ changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
+ elif state == 'absent':
+ try:
+ changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ try:
+ changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ except NotSupportedError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts})
+ except CannotDropError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception, e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
+
+# import ansible utilities
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/database/vertica/vertica_user.py b/lib/ansible/modules/extras/database/vertica/vertica_user.py
new file mode 100644
index 0000000000..7c52df3163
--- /dev/null
+++ b/lib/ansible/modules/extras/database/vertica/vertica_user.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: vertica_user
+version_added: '2.0'
+short_description: Adds or removes Vertica database users and assigns roles.
+description:
+ - Adds or removes Vertica database user and, optionally, assigns roles.
+ - A user will not be removed until all the dependencies have been dropped.
+ - In such a situation, if the module tries to remove the user it
+ will fail and only remove roles granted to the user.
+options:
+ name:
+ description:
+ - Name of the user to add or remove.
+ required: true
+ profile:
+ description:
+ - Sets the user's profile.
+ required: false
+ default: null
+ resource_pool:
+ description:
+ - Sets the user's resource pool.
+ required: false
+ default: null
+ password:
+ description:
+ - The user's password encrypted by the MD5 algorithm.
+ - The password must be generated with the format C("md5" + md5[password + username]),
+ resulting in a total of 35 characters. An easy way to do this is by querying
+ the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ required: false
+ default: null
+ expired:
+ description:
+ - Sets the user's password expiration.
+ required: false
+ default: null
+ ldap:
+ description:
+ - Set to true if users are authenticated via LDAP.
+ - The user will be created with password expired and set to I($ldap$).
+ required: false
+ default: null
+ roles:
+ description:
+ - Comma separated list of roles to assign to the user.
+ aliases: ['role']
+ required: false
+ default: null
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ required: false
+ choices: ['present', 'absent', 'locked']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ required: false
+ default: null
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ required: false
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ required: false
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ required: false
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+ required: false
+ default: null
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+"""
+
+EXAMPLES = """
+- name: creating a new vertica user with password
+ vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
+
+- name: creating a new vertica user authenticated via ldap with roles assigned
+ vertica_user:
+ name=user_name
+ ldap=true
+ db=db_name
+ roles=schema_name_ro
+ state=present
+"""
+
+try:
+ import pyodbc
+except ImportError:
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+class NotSupportedError(Exception):
+ pass
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+def update_roles(user_facts, cursor, user,
+ existing_all, existing_default, required):
+ del_roles = list(set(existing_all) - set(required))
+ if del_roles:
+ cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
+ new_roles = list(set(required) - set(existing_all))
+ if new_roles:
+ cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
+ if required:
+ cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
+def check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ return False
+ if profile and profile != user_facts[user_key]['profile']:
+ return False
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ return False
+ if locked != (user_facts[user_key]['locked'] == 'True'):
+ return False
+ if password and password != user_facts[user_key]['password']:
+ return False
+ if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
+ return False
+ if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
+ cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
+ return False
+ return True
+
+def present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ query_fragments = ["create user {0}".format(user)]
+ if locked:
+ query_fragments.append("account lock")
+ if password or ldap:
+ if password:
+ query_fragments.append("identified by '{0}'".format(password))
+ else:
+ query_fragments.append("identified by '$ldap$'")
+ if expired or ldap:
+ query_fragments.append("password expire")
+ if profile:
+ query_fragments.append("profile {0}".format(profile))
+ if resource_pool:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ cursor.execute(' '.join(query_fragments))
+ if resource_pool and resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ update_roles(user_facts, cursor, user, [], [], roles)
+ user_facts.update(get_user_facts(cursor, user))
+ return True
+ else:
+ changed = False
+ query_fragments = ["alter user {0}".format(user)]
+ if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
+ if locked:
+ state = 'lock'
+ else:
+ state = 'unlock'
+ query_fragments.append("account {0}".format(state))
+ changed = True
+ if password and password != user_facts[user_key]['password']:
+ query_fragments.append("identified by '{0}'".format(password))
+ changed = True
+ if ldap:
+ if ldap != (user_facts[user_key]['expired'] == 'True'):
+ query_fragments.append("password expire")
+ changed = True
+ elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
+ if expired:
+ query_fragments.append("password expire")
+ changed = True
+ else:
+ raise NotSupportedError("Unexpiring user password is not supported.")
+ if profile and profile != user_facts[user_key]['profile']:
+ query_fragments.append("profile {0}".format(profile))
+ changed = True
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ if user_facts[user_key]['resource_pool'] != 'general':
+ cursor.execute("revoke usage on resource pool {0} from {1}".format(
+ user_facts[user_key]['resource_pool'], user))
+ if resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ changed = True
+ if changed:
+ cursor.execute(' '.join(query_fragments))
+ if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
+ cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ changed = True
+ if changed:
+ user_facts.update(get_user_facts(cursor, user))
+ return changed
+
+def absent(user_facts, cursor, user, roles):
+ user_key = user.lower()
+ if user_key in user_facts:
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ try:
+ cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping user failed due to dependencies.")
+ del user_facts[user_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True, aliases=['name']),
+ profile=dict(default=None),
+ resource_pool=dict(default=None),
+ password=dict(default=None),
+ expired=dict(type='bool', default=None),
+ ldap=dict(type='bool', default=None),
+ roles=dict(default=None, aliases=['role']),
+ state=dict(default='present', choices=['absent', 'present', 'locked']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None),
+ ), supports_check_mode = True)
+
+ if not pyodbc_found:
+ module.fail_json(msg="The python pyodbc module is required.")
+
+ user = module.params['user']
+ profile = module.params['profile']
+ if profile:
+ profile = profile.lower()
+ resource_pool = module.params['resource_pool']
+ if resource_pool:
+ resource_pool = resource_pool.lower()
+ password = module.params['password']
+ expired = module.params['expired']
+ ldap = module.params['ldap']
+ roles = []
+ if module.params['roles']:
+ roles = module.params['roles'].split(',')
+ roles = filter(None, roles)
+ state = module.params['state']
+ if state == 'locked':
+ locked = True
+ else:
+ locked = False
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception, e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ user_facts = get_user_facts(cursor)
+ if module.check_mode:
+ changed = not check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ elif state == 'absent':
+ try:
+ changed = absent(user_facts, cursor, user, roles)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ elif state in ['present', 'locked']:
+ try:
+ changed = present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ except pyodbc.Error, e:
+ module.fail_json(msg=str(e))
+ except NotSupportedError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
+ except CannotDropError, e:
+ module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception, e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
+
+# import ansible utilities
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/files/__init__.py b/lib/ansible/modules/extras/files/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/files/__init__.py
diff --git a/lib/ansible/modules/extras/files/archive.py b/lib/ansible/modules/extras/files/archive.py
new file mode 100644
index 0000000000..2b927e39c1
--- /dev/null
+++ b/lib/ansible/modules/extras/files/archive.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+"""
+(c) 2016, Ben Doherty <bendohmv@gmail.com>
+Sponsored by Oomph, Inc. http://www.oomphinc.com
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: archive
+version_added: 2.2
+short_description: Creates a compressed archive of one or more files or trees.
+extends_documentation_fragment: files
+description:
+ - The M(archive) module packs an archive. It is the opposite of the unarchive module. By default, it assumes the compression source exists on the target. It will not copy the source file from the local system to the target before archiving. Source files can be deleted after archival by specifying C(remove)=I(True).
+options:
+ path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
+ required: true
+ format:
+ description:
+ - The type of compression to use. Can be 'gz', 'bz2', or 'zip'.
+ choices: [ 'gz', 'bz2', 'zip' ]
+ default: 'gz'
+ dest:
+ description:
+ - The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ required: false
+ default: null
+ remove:
+ description:
+ - Remove any added source files and trees after adding to archive.
+ type: bool
+ required: false
+ default: false
+
+author: "Ben Doherty (@bendoh)"
+notes:
+ - requires tarfile, zipfile, gzip, and bzip2 packages on target host
+ - can produce I(gzip), I(bzip2) and I(zip) compressed files or archives
+'''
+
+EXAMPLES = '''
+# Compress directory /path/to/foo/ into /path/to/foo.tgz
+- archive: path=/path/to/foo dest=/path/to/foo.tgz
+
+# Compress regular file /path/to/foo into /path/to/foo.gz and remove it
+- archive: path=/path/to/foo remove=True
+
+# Create a zip archive of /path/to/foo
+- archive: path=/path/to/foo format=zip
+
+# Create a bz2 archive of multiple files, rooted at /path
+- archive:
+ path:
+ - /path/to/foo
+ - /path/wong/foo
+ dest: /path/file.tar.bz2
+ format: bz2
+'''
+
+RETURN = '''
+state:
+ description:
+ The current state of the archived file.
+ If 'absent', then no source files were found and the archive does not exist.
+ If 'compress', then the file source file is in the compressed state.
+ If 'archive', then the source file or paths are currently archived.
+ If 'incomplete', then an archive was created, but not all source paths were found.
+ type: string
+ returned: always
+missing:
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
+archived:
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
+arcroot:
+ description: The archive root.
+ type: string
+expanded_paths:
+ description: The list of matching paths from paths argument.
+ type: list
+'''
+
+import os
+import re
+import glob
+import shutil
+import gzip
+import bz2
+import filecmp
+import zipfile
+import tarfile
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(type='list', required=True),
+ format = dict(choices=['gz', 'bz2', 'zip', 'tar'], default='gz', required=False),
+ dest = dict(required=False, type='path'),
+ remove = dict(required=False, default=False, type='bool'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ check_mode = module.check_mode
+ paths = params['path']
+ dest = params['dest']
+ remove = params['remove']
+
+ expanded_paths = []
+ format = params['format']
+ globby = False
+ changed = False
+ state = 'absent'
+
+ # Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
+ archive = False
+ successes = []
+
+ for i, path in enumerate(paths):
+ path = os.path.expanduser(os.path.expandvars(path))
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if ('*' in path or '?' in path):
+ expanded_paths = expanded_paths + glob.glob(path)
+ globby = True
+
+ # If there are no glob characters the path is added to the expanded paths
+ # whether the path exists or not
+ else:
+ expanded_paths.append(path)
+
+ if len(expanded_paths) == 0:
+ return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found')
+
+ # If we actually matched multiple files or TRIED to, then
+ # treat this as a multi-file archive
+ archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1
+
+ # Default created file name (for single-file archives) to
+ # <file>.<format>
+ if not dest and not archive:
+ dest = '%s.%s' % (expanded_paths[0], format)
+
+ # Force archives to specify 'dest'
+ if archive and not dest:
+ module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
+
+ archive_paths = []
+ missing = []
+ arcroot = ''
+
+ for path in expanded_paths:
+ # Use the longest common directory name among all the files
+ # as the archive root path
+ if arcroot == '':
+ arcroot = os.path.dirname(path) + os.sep
+ else:
+ for i in range(len(arcroot)):
+ if path[i] != arcroot[i]:
+ break
+
+ if i < len(arcroot):
+ arcroot = os.path.dirname(arcroot[0:i+1])
+
+ arcroot += os.sep
+
+ # Don't allow archives to be created anywhere within paths to be removed
+ if remove and os.path.isdir(path) and dest.startswith(path):
+ module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True')
+
+ if os.path.lexists(path):
+ archive_paths.append(path)
+ else:
+ missing.append(path)
+
+ # No source files were found but the named archive exists: are we 'compress' or 'archive' now?
+ if len(missing) == len(expanded_paths) and dest and os.path.exists(dest):
+ # Just check the filename to know if it's an archive or simple compressed file
+ if re.search(r'(\.tar|\.tar\.gz|\.tgz|.tbz2|\.tar\.bz2|\.zip)$', os.path.basename(dest), re.IGNORECASE):
+ state = 'archive'
+ else:
+ state = 'compress'
+
+ # Multiple files, or globbiness
+ elif archive:
+ if len(archive_paths) == 0:
+ # No source files were found, but the archive is there.
+ if os.path.lexists(dest):
+ state = 'archive'
+ elif len(missing) > 0:
+ # SOME source files were found, but not all of them
+ state = 'incomplete'
+
+ archive = None
+ size = 0
+ errors = []
+
+ if os.path.lexists(dest):
+ size = os.path.getsize(dest)
+
+ if state != 'archive':
+ if check_mode:
+ changed = True
+
+ else:
+ try:
+ # Slightly more difficult (and less efficient!) compression using zipfile module
+ if format == 'zip':
+ arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED)
+
+ # Easier compression using tarfile module
+ elif format == 'gz' or format == 'bz2':
+ arcfile = tarfile.open(dest, 'w|' + format)
+
+ # Or plain tar archiving
+ elif format == 'tar':
+ arcfile = tarfile.open(dest, 'w')
+
+ for path in archive_paths:
+ if os.path.isdir(path):
+ # Recurse into directories
+ for dirpath, dirnames, filenames in os.walk(path, topdown=True):
+ if not dirpath.endswith(os.sep):
+ dirpath += os.sep
+
+ for dirname in dirnames:
+ fullpath = dirpath + dirname
+ arcname = fullpath[len(arcroot):]
+
+ try:
+ if format == 'zip':
+ arcfile.write(fullpath, arcname)
+ else:
+ arcfile.add(fullpath, arcname, recursive=False)
+
+ except Exception:
+ e = get_exception()
+ errors.append('%s: %s' % (fullpath, str(e)))
+
+ for filename in filenames:
+ fullpath = dirpath + filename
+ arcname = fullpath[len(arcroot):]
+
+ if not filecmp.cmp(fullpath, dest):
+ try:
+ if format == 'zip':
+ arcfile.write(fullpath, arcname)
+ else:
+ arcfile.add(fullpath, arcname, recursive=False)
+
+ successes.append(fullpath)
+ except Exception:
+ e = get_exception()
+ errors.append('Adding %s: %s' % (path, str(e)))
+ else:
+ if format == 'zip':
+ arcfile.write(path, path[len(arcroot):])
+ else:
+ arcfile.add(path, path[len(arcroot):], recursive=False)
+
+ successes.append(path)
+
+ except Exception:
+ e = get_exception()
+ return module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, str(e)))
+
+ if arcfile:
+ arcfile.close()
+ state = 'archive'
+
+ if len(errors) > 0:
+ module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
+
+ if state in ['archive', 'incomplete'] and remove:
+ for path in successes:
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ elif not check_mode:
+ os.remove(path)
+ except OSError:
+ e = get_exception()
+ errors.append(path)
+
+ if len(errors) > 0:
+ module.fail_json(dest=dest, msg='Error deleting some source files: ' + str(e), files=errors)
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if os.path.getsize(dest) != size:
+ changed = True
+
+ if len(successes) and state != 'incomplete':
+ state = 'archive'
+
+ # Simple, single-file compression
+ else:
+ path = expanded_paths[0]
+
+ # No source or compressed file
+ if not (os.path.exists(path) or os.path.lexists(dest)):
+ state = 'absent'
+
+ # if it already exists and the source file isn't there, consider this done
+ elif not os.path.lexists(path) and os.path.lexists(dest):
+ state = 'compress'
+
+ else:
+ if module.check_mode:
+ if not os.path.exists(dest):
+ changed = True
+ else:
+ size = 0
+ f_in = f_out = arcfile = None
+
+ if os.path.lexists(dest):
+ size = os.path.getsize(dest)
+
+ try:
+ if format == 'zip':
+ arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED)
+ arcfile.write(path, path[len(arcroot):])
+ arcfile.close()
+ state = 'archive' # because all zip files are archives
+
+ else:
+ f_in = open(path, 'rb')
+
+ if format == 'gz':
+ f_out = gzip.open(dest, 'wb')
+ elif format == 'bz2':
+ f_out = bz2.BZ2File(dest, 'wb')
+ else:
+ raise OSError("Invalid format")
+
+ shutil.copyfileobj(f_in, f_out)
+
+ successes.append(path)
+
+ except OSError:
+ e = get_exception()
+ module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % str(e))
+
+ if arcfile:
+ arcfile.close()
+ if f_in:
+ f_in.close()
+ if f_out:
+ f_out.close()
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if os.path.getsize(dest) != size:
+ changed = True
+
+ state = 'compress'
+
+ if remove and not check_mode:
+ try:
+ os.remove(path)
+
+ except OSError:
+ e = get_exception()
+ module.fail_json(path=path, msg='Unable to remove source file: %s' % str(e))
+
+ params['path'] = dest
+ file_args = module.load_file_common_arguments(params)
+
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ module.exit_json(archived=successes, dest=dest, changed=changed, state=state, arcroot=arcroot, missing=missing, expanded_paths=expanded_paths)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/files/blockinfile.py b/lib/ansible/modules/extras/files/blockinfile.py
new file mode 100755
index 0000000000..7b25101242
--- /dev/null
+++ b/lib/ansible/modules/extras/files/blockinfile.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: blockinfile
+author:
+ - 'YAEGASHI Takeshi (@yaegashi)'
+extends_documentation_fragment:
+ - files
+ - validate
+short_description: Insert/update/remove a text block
+ surrounded by marker lines.
+version_added: '2.0'
+description:
+ - This module will insert/update/remove a block of multi-line text
+ surrounded by customizable marker lines.
+notes:
+ - This module supports check mode.
+ - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+options:
+ dest:
+ aliases: [ name, destfile ]
+ required: true
+ description:
+ - The file to modify.
+ state:
+ required: false
+ choices: [ present, absent ]
+ default: present
+ description:
+ - Whether the block should be there or not.
+ marker:
+ required: false
+ default: '# {mark} ANSIBLE MANAGED BLOCK'
+ description:
+ - The marker line template.
+ "{mark}" will be replaced with "BEGIN" or "END".
+ block:
+ aliases: [ content ]
+ required: false
+ default: ''
+ description:
+ - The text to insert inside the marker lines.
+ If it's missing or an empty string,
+ the block will be removed as if C(state) were specified to C(absent).
+ insertafter:
+ required: false
+ default: EOF
+ description:
+ - If specified, the block will be inserted after the last match of
+ specified regular expression. A special value is available; C(EOF) for
+ inserting the block at the end of the file. If specified regular
+ expresion has no matches, C(EOF) will be used instead.
+ choices: [ 'EOF', '*regex*' ]
+ insertbefore:
+ required: false
+ default: None
+ description:
+ - If specified, the block will be inserted before the last match of
+ specified regular expression. A special value is available; C(BOF) for
+ inserting the block at the beginning of the file. If specified regular
+ expresion has no matches, the block will be inserted at the end of the
+ file.
+ choices: [ 'BOF', '*regex*' ]
+ create:
+ required: false
+ default: 'no'
+ choices: [ 'yes', 'no' ]
+ description:
+ - Create a new file if it doesn't exist.
+ backup:
+ required: false
+ default: 'no'
+ choices: [ 'yes', 'no' ]
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ follow:
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ description:
+ - 'This flag indicates that filesystem links, if they exist, should be followed.'
+ version_added: "2.1"
+"""
+
+EXAMPLES = r"""
+- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config
+ blockinfile:
+ dest: /etc/ssh/sshd_config
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+
+- name: insert/update eth0 configuration stanza in /etc/network/interfaces
+ (it might be better to copy files into /etc/network/interfaces.d/)
+ blockinfile:
+ dest: /etc/network/interfaces
+ block: |
+ iface eth0 inet static
+ address 192.0.2.23
+ netmask 255.255.255.0
+
+- name: insert/update HTML surrounded by custom markers after <body> line
+ blockinfile:
+ dest: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ insertafter: "<body>"
+ content: |
+ <h1>Welcome to {{ansible_hostname}}</h1>
+ <p>Last updated on {{ansible_date_time.iso8601}}</p>
+
+- name: remove HTML as well as surrounding markers
+ blockinfile:
+ dest: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ content: ""
+
+- name: Add mappings to /etc/hosts
+ blockinfile:
+ dest: /etc/hosts
+ block: |
+ {{item.ip}} {{item.name}}
+ marker: "# {mark} ANSIBLE MANAGED BLOCK {{item.name}}"
+ with_items:
+ - { name: host1, ip: 10.10.1.10 }
+ - { name: host2, ip: 10.10.1.11 }
+ - { name: host3, ip: 10.10.1.12 }
+"""
+
+import re
+import os
+import tempfile
+
+
+def write_changes(module, contents, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, dest, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(required=True, aliases=['name', 'destfile'], type='path'),
+ state=dict(default='present', choices=['absent', 'present']),
+ marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
+ block=dict(default='', type='str', aliases=['content']),
+ insertafter=dict(default=None),
+ insertbefore=dict(default=None),
+ create=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ validate=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ params = module.params
+ dest = params['dest']
+ if module.boolean(params.get('follow', None)):
+ dest = os.path.realpath(dest)
+
+ if os.path.isdir(dest):
+ module.fail_json(rc=256,
+ msg='Destination %s is a directory !' % dest)
+
+ path_exists = os.path.exists(dest)
+ if not path_exists:
+ if not module.boolean(params['create']):
+ module.fail_json(rc=257,
+ msg='Destination %s does not exist !' % dest)
+ original = None
+ lines = []
+ else:
+ f = open(dest, 'rb')
+ original = f.read()
+ f.close()
+ lines = original.splitlines()
+
+ insertbefore = params['insertbefore']
+ insertafter = params['insertafter']
+ block = params['block']
+ marker = params['marker']
+ present = params['state'] == 'present'
+
+ if not present and not path_exists:
+ module.exit_json(changed=False, msg="File not present")
+
+ if insertbefore is None and insertafter is None:
+ insertafter = 'EOF'
+
+ if insertafter not in (None, 'EOF'):
+ insertre = re.compile(insertafter)
+ elif insertbefore not in (None, 'BOF'):
+ insertre = re.compile(insertbefore)
+ else:
+ insertre = None
+
+ marker0 = re.sub(r'{mark}', 'BEGIN', marker)
+ marker1 = re.sub(r'{mark}', 'END', marker)
+ if present and block:
+ # Escape seqeuences like '\n' need to be handled in Ansible 1.x
+ if module.ansible_version.startswith('1.'):
+ block = re.sub('', block, '')
+ blocklines = [marker0] + block.splitlines() + [marker1]
+ else:
+ blocklines = []
+
+ n0 = n1 = None
+ for i, line in enumerate(lines):
+ if line.startswith(marker0):
+ n0 = i
+ if line.startswith(marker1):
+ n1 = i
+
+ if None in (n0, n1):
+ n0 = None
+ if insertre is not None:
+ for i, line in enumerate(lines):
+ if insertre.search(line):
+ n0 = i
+ if n0 is None:
+ n0 = len(lines)
+ elif insertafter is not None:
+ n0 += 1
+ elif insertbefore is not None:
+ n0 = 0 # insertbefore=BOF
+ else:
+ n0 = len(lines) # insertafter=EOF
+ elif n0 < n1:
+ lines[n0:n1+1] = []
+ else:
+ lines[n1:n0+1] = []
+ n0 = n1
+
+ lines[n0:n0] = blocklines
+
+ if lines:
+ result = '\n'.join(lines)
+ if original and original.endswith('\n'):
+ result += '\n'
+ else:
+ result = ''
+ if original == result:
+ msg = ''
+ changed = False
+ elif original is None:
+ msg = 'File created'
+ changed = True
+ elif not blocklines:
+ msg = 'Block removed'
+ changed = True
+ else:
+ msg = 'Block inserted'
+ changed = True
+
+ if changed and not module.check_mode:
+ if module.boolean(params['backup']) and path_exists:
+ module.backup_local(dest)
+ write_changes(module, result, dest)
+
+ if module.check_mode and not path_exists:
+ module.exit_json(changed=changed, msg=msg)
+
+ msg, changed = check_file_attrs(module, changed, msg)
+ module.exit_json(changed=changed, msg=msg)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.splitter import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/files/patch.py b/lib/ansible/modules/extras/files/patch.py
new file mode 100644
index 0000000000..123d667fdb
--- /dev/null
+++ b/lib/ansible/modules/extras/files/patch.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
+# (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: patch
+author:
+ - "Jakub Jirutka (@jirutka)"
+ - "Luis Alberto Perez Lazaro (@luisperlaz)"
+version_added: 1.9
+description:
+ - Apply patch files using the GNU patch tool.
+short_description: Apply patch files using the GNU patch tool.
+options:
+ basedir:
+ description:
+ - Path of a base directory in which the patch file will be applied.
+ May be ommitted when C(dest) option is specified, otherwise required.
+ required: false
+ dest:
+ description:
+ - Path of the file on the remote machine to be patched.
+ - The names of the files to be patched are usually taken from the patch
+ file, but if there's just one file to be patched it can specified with
+ this option.
+ required: false
+ aliases: [ "originalfile" ]
+ src:
+ description:
+ - Path of the patch file as accepted by the GNU patch tool. If
+ C(remote_src) is 'no', the patch source file is looked up from the
+ module's "files" directory.
+ required: true
+ aliases: [ "patchfile" ]
+ remote_src:
+ description:
+ - If C(no), it will search for src at originating/master machine, if C(yes) it will
+ go to the remote/target machine for the src. Default is C(no).
+ choices: [ "yes", "no" ]
+ required: false
+ default: "no"
+ strip:
+ description:
+ - Number that indicates the smallest prefix containing leading slashes
+ that will be stripped from each file name found in the patch file.
+ For more information see the strip parameter of the GNU patch tool.
+ required: false
+ type: "int"
+ default: "0"
+ backup:
+ version_added: "2.0"
+ description:
+ - passes --backup --version-control=numbered to patch,
+ producing numbered backup copies
+ choices: [ 'yes', 'no' ]
+ default: 'no'
+ binary:
+ version_added: "2.0"
+ description:
+ - Setting to C(yes) will disable patch's heuristic for transforming CRLF
+ line endings into LF. Line endings of src and dest must match. If set to
+ C(no), patch will replace CRLF in src files on POSIX.
+ required: false
+ type: "bool"
+ default: "no"
+note:
+ - This module requires GNU I(patch) utility to be installed on the remote host.
+'''
+
+EXAMPLES = '''
+- name: apply patch to one file
+ patch: >
+ src=/tmp/index.html.patch
+ dest=/var/www/index.html
+
+- name: apply patch to multiple files under basedir
+ patch: >
+ src=/tmp/customize.patch
+ basedir=/var/www
+ strip=1
+'''
+
+import os
+from os import path, R_OK, W_OK
+
+
+class PatchError(Exception):
+ pass
+
+
+def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0):
+ opts = ['--quiet', '--reverse', '--forward', '--dry-run',
+ "--strip=%s" % strip, "--directory='%s'" % basedir,
+ "--input='%s'" % patch_file]
+ if binary:
+ opts.append('--binary')
+ if dest_file:
+ opts.append("'%s'" % dest_file)
+
+ (rc, _, _) = patch_func(opts)
+ return rc == 0
+
+
+def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False):
+ opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
+ "--strip=%s" % strip, "--directory='%s'" % basedir,
+ "--input='%s'" % patch_file]
+ if dry_run:
+ opts.append('--dry-run')
+ if binary:
+ opts.append('--binary')
+ if dest_file:
+ opts.append("'%s'" % dest_file)
+ if backup:
+ opts.append('--backup --version-control=numbered')
+
+ (rc, out, err) = patch_func(opts)
+ if rc != 0:
+ msg = err or out
+ raise PatchError(msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'src': {'required': True, 'aliases': ['patchfile']},
+ 'dest': {'aliases': ['originalfile']},
+ 'basedir': {},
+ 'strip': {'default': 0, 'type': 'int'},
+ 'remote_src': {'default': False, 'type': 'bool'},
+ # NB: for 'backup' parameter, semantics is slightly different from standard
+ # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
+ 'backup': {'default': False, 'type': 'bool'},
+ 'binary': {'default': False, 'type': 'bool'},
+ },
+ required_one_of=[['dest', 'basedir']],
+ supports_check_mode=True
+ )
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+
+ p.src = os.path.expanduser(p.src)
+ if not os.access(p.src, R_OK):
+ module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
+
+ if p.dest and not os.access(p.dest, W_OK):
+ module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
+
+ if p.basedir and not path.exists(p.basedir):
+ module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
+
+ if not p.basedir:
+ p.basedir = path.dirname(p.dest)
+
+ patch_bin = module.get_bin_path('patch')
+ if patch_bin is None:
+ module.fail_json(msg="patch command not found")
+ patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts)))
+
+ # patch need an absolute file name
+ p.src = os.path.abspath(p.src)
+
+ changed = False
+ if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip):
+ try:
+ apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
+ dry_run=module.check_mode, backup=p.backup )
+ changed = True
+ except PatchError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/identity/__init__.py b/lib/ansible/modules/extras/identity/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/identity/__init__.py
diff --git a/lib/ansible/modules/extras/identity/opendj/__init__.py b/lib/ansible/modules/extras/identity/opendj/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/identity/opendj/__init__.py
diff --git a/lib/ansible/modules/extras/identity/opendj/opendj_backendprop.py b/lib/ansible/modules/extras/identity/opendj/opendj_backendprop.py
new file mode 100644
index 0000000000..64571c0ed3
--- /dev/null
+++ b/lib/ansible/modules/extras/identity/opendj/opendj_backendprop.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+version_added: "2.2"
+author:
+ - Werner Dijkerman
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+'''
+
+EXAMPLES = '''
+ - name: "Add or update OpenDJ backend properties"
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+import subprocess
+
+
+class BackendProp(object):
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name,name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+ else:
+ module.fail_json(msg="No credentials are given. Use either 'password' or 'passwordfile'")
+
+ if module.params["passwordfile"] and module.params["password"]:
+ module.fail_json(msg="only one of 'password' or 'passwordfile' can be set")
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/messaging/__init__.py b/lib/ansible/modules/extras/messaging/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/__init__.py
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_binding.py b/lib/ansible/modules/extras/messaging/rabbitmq_binding.py
new file mode 100644
index 0000000000..c1ca32a1ce
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_binding.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_binding
+author: "Manuel Sousa (@manuel-sousa)"
+version_added: "2.0"
+
+short_description: This module manages rabbitMQ bindings
+description:
+ - This module uses rabbitMQ Rest API to create/delete bindings
+requirements: [ "requests >= 1.0.0" ]
+options:
+ state:
+ description:
+ - Whether the exchange should be present or absent
+ - Only present implemented atm
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ name:
+ description:
+ - source exchange to create binding on
+ required: true
+ aliases: [ "src", "source" ]
+ login_user:
+ description:
+ - rabbitMQ user for connection
+ required: false
+ default: guest
+ login_password:
+ description:
+ - rabbitMQ password for connection
+ required: false
+ default: false
+ login_host:
+ description:
+ - rabbitMQ host for connection
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - rabbitMQ management api port
+ required: false
+ default: 15672
+ vhost:
+ description:
+ - rabbitMQ virtual host
+ - default vhost is /
+ required: false
+ default: "/"
+ destination:
+ description:
+ - destination exchange or queue for the binding
+ required: true
+ aliases: [ "dst", "dest" ]
+ destination_type:
+ description:
+ - Either queue or exchange
+ required: true
+ choices: [ "queue", "exchange" ]
+ aliases: [ "type", "dest_type" ]
+ routing_key:
+ description:
+ - routing key for the binding
+ - default is #
+ required: false
+ default: "#"
+ arguments:
+ description:
+ - extra arguments for exchange. If defined this argument is a key/value dictionary
+ required: false
+ default: {}
+'''
+
+EXAMPLES = '''
+# Bind myQueue to directExchange with routing key info
+- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info
+
+# Bind directExchange to topicExchange with routing key *.info
+- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info"
+'''
+
+import requests
+import urllib
+import json
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ name = dict(required=True, aliases=[ "src", "source" ], type='str'),
+ login_user = dict(default='guest', type='str'),
+ login_password = dict(default='guest', type='str', no_log=True),
+ login_host = dict(default='localhost', type='str'),
+ login_port = dict(default='15672', type='str'),
+ vhost = dict(default='/', type='str'),
+ destination = dict(required=True, aliases=[ "dst", "dest"], type='str'),
+ destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'),
+ routing_key = dict(default='#', type='str'),
+ arguments = dict(default=dict(), type='dict')
+ ),
+ supports_check_mode = True
+ )
+
+ if module.params['destination_type'] == "queue":
+ dest_type="q"
+ else:
+ dest_type="e"
+
+ if module.params['routing_key'] == "":
+ props = "~"
+ else:
+ props = urllib.quote(module.params['routing_key'],'')
+
+ url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
+ module.params['login_host'],
+ module.params['login_port'],
+ urllib.quote(module.params['vhost'],''),
+ urllib.quote(module.params['name'],''),
+ dest_type,
+ urllib.quote(module.params['destination'],''),
+ props
+ )
+
+ # Check if exchange already exists
+ r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
+
+ if r.status_code==200:
+ binding_exists = True
+ response = r.json()
+ elif r.status_code==404:
+ binding_exists = False
+ response = r.text
+ else:
+ module.fail_json(
+ msg = "Invalid response from RESTAPI when trying to check if exchange exists",
+ details = r.text
+ )
+
+ if module.params['state']=='present':
+ change_required = not binding_exists
+ else:
+ change_required = binding_exists
+
+ # Exit if check_mode
+ if module.check_mode:
+ module.exit_json(
+ changed= change_required,
+ name = module.params['name'],
+ details = response,
+ arguments = module.params['arguments']
+ )
+
+ # Do changes
+ if change_required:
+ if module.params['state'] == 'present':
+ url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % (
+ module.params['login_host'],
+ module.params['login_port'],
+ urllib.quote(module.params['vhost'],''),
+ urllib.quote(module.params['name'],''),
+ dest_type,
+ urllib.quote(module.params['destination'],'')
+ )
+
+ r = requests.post(
+ url,
+ auth = (module.params['login_user'],module.params['login_password']),
+ headers = { "content-type": "application/json"},
+ data = json.dumps({
+ "routing_key": module.params['routing_key'],
+ "arguments": module.params['arguments']
+ })
+ )
+ elif module.params['state'] == 'absent':
+ r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
+
+ if r.status_code == 204 or r.status_code == 201:
+ module.exit_json(
+ changed = True,
+ name = module.params['name'],
+ destination = module.params['destination']
+ )
+ else:
+ module.fail_json(
+ msg = "Error creating exchange",
+ status = r.status_code,
+ details = r.text
+ )
+
+ else:
+ module.exit_json(
+ changed = False,
+ name = module.params['name']
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_exchange.py b/lib/ansible/modules/extras/messaging/rabbitmq_exchange.py
new file mode 100644
index 0000000000..7b55b5c683
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_exchange.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_exchange
+author: "Manuel Sousa (@manuel-sousa)"
+version_added: "2.0"
+
+short_description: This module manages rabbitMQ exchanges
+description:
+ - This module uses rabbitMQ Rest API to create/delete exchanges
+requirements: [ "requests >= 1.0.0" ]
+options:
+ name:
+ description:
+ - Name of the exchange to create
+ required: true
+ state:
+ description:
+ - Whether the exchange should be present or absent
+ - Only present implemented atm
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ login_user:
+ description:
+ - rabbitMQ user for connection
+ required: false
+ default: guest
+ login_password:
+ description:
+ - rabbitMQ password for connection
+ required: false
+ default: false
+ login_host:
+ description:
+ - rabbitMQ host for connection
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - rabbitMQ management api port
+ required: false
+ default: 15672
+ vhost:
+ description:
+ - rabbitMQ virtual host
+ required: false
+ default: "/"
+ durable:
+ description:
+ - whether exchange is durable or not
+ required: false
+ choices: [ "yes", "no" ]
+ default: yes
+ exchange_type:
+ description:
+ - type for the exchange
+ required: false
+ choices: [ "fanout", "direct", "headers", "topic" ]
+ aliases: [ "type" ]
+ default: direct
+ auto_delete:
+ description:
+ - if the exchange should delete itself after all queues/exchanges unbound from it
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ internal:
+ description:
+ - exchange is available only for other exchanges
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ arguments:
+ description:
+ - extra arguments for exchange. If defined this argument is a key/value dictionary
+ required: false
+ default: {}
+'''
+
+EXAMPLES = '''
+# Create direct exchange
+- rabbitmq_exchange: name=directExchange
+
+# Create topic exchange on vhost
+- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost
+'''
+
+import requests
+import urllib
+import json
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ name = dict(required=True, type='str'),
+ login_user = dict(default='guest', type='str'),
+ login_password = dict(default='guest', type='str', no_log=True),
+ login_host = dict(default='localhost', type='str'),
+ login_port = dict(default='15672', type='str'),
+ vhost = dict(default='/', type='str'),
+ durable = dict(default=True, type='bool'),
+ auto_delete = dict(default=False, type='bool'),
+ internal = dict(default=False, type='bool'),
+ exchange_type = dict(default='direct', aliases=['type'], type='str'),
+ arguments = dict(default=dict(), type='dict')
+ ),
+ supports_check_mode = True
+ )
+
+ url = "http://%s:%s/api/exchanges/%s/%s" % (
+ module.params['login_host'],
+ module.params['login_port'],
+ urllib.quote(module.params['vhost'],''),
+ urllib.quote(module.params['name'],'')
+ )
+
+ # Check if exchange already exists
+ r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
+
+ if r.status_code==200:
+ exchange_exists = True
+ response = r.json()
+ elif r.status_code==404:
+ exchange_exists = False
+ response = r.text
+ else:
+ module.fail_json(
+ msg = "Invalid response from RESTAPI when trying to check if exchange exists",
+ details = r.text
+ )
+
+ if module.params['state']=='present':
+ change_required = not exchange_exists
+ else:
+ change_required = exchange_exists
+
+ # Check if attributes change on existing exchange
+ if not change_required and r.status_code==200 and module.params['state'] == 'present':
+ if not (
+ response['durable'] == module.params['durable'] and
+ response['auto_delete'] == module.params['auto_delete'] and
+ response['internal'] == module.params['internal'] and
+ response['type'] == module.params['exchange_type']
+ ):
+ module.fail_json(
+ msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges"
+ )
+
+ # Exit if check_mode
+ if module.check_mode:
+ module.exit_json(
+ changed= change_required,
+ name = module.params['name'],
+ details = response,
+ arguments = module.params['arguments']
+ )
+
+ # Do changes
+ if change_required:
+ if module.params['state'] == 'present':
+ r = requests.put(
+ url,
+ auth = (module.params['login_user'],module.params['login_password']),
+ headers = { "content-type": "application/json"},
+ data = json.dumps({
+ "durable": module.params['durable'],
+ "auto_delete": module.params['auto_delete'],
+ "internal": module.params['internal'],
+ "type": module.params['exchange_type'],
+ "arguments": module.params['arguments']
+ })
+ )
+ elif module.params['state'] == 'absent':
+ r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
+
+ if r.status_code == 204:
+ module.exit_json(
+ changed = True,
+ name = module.params['name']
+ )
+ else:
+ module.fail_json(
+ msg = "Error creating exchange",
+ status = r.status_code,
+ details = r.text
+ )
+
+ else:
+ module.exit_json(
+ changed = False,
+ name = module.params['name']
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_parameter.py b/lib/ansible/modules/extras/messaging/rabbitmq_parameter.py
new file mode 100644
index 0000000000..9022910928
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_parameter.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_parameter
+short_description: Adds or removes parameters to RabbitMQ
+description:
+ - Manage dynamic, cluster-wide parameters for RabbitMQ
+version_added: "1.1"
+author: '"Chris Hoffman (@chrishoffman)"'
+options:
+ component:
+ description:
+ - Name of the component of which the parameter is being set
+ required: true
+ default: null
+ name:
+ description:
+ - Name of the parameter being set
+ required: true
+ default: null
+ value:
+ description:
+ - Value of the parameter, as a JSON term
+ required: false
+ default: null
+ vhost:
+ description:
+ - vhost to apply access privileges.
+ required: false
+ default: /
+ node:
+ description:
+ - erlang node name of the rabbit we wish to configure
+ required: false
+ default: rabbit
+ version_added: "1.2"
+ state:
+ description:
+ - Specify if user is to be added or removed
+ required: false
+ default: present
+ choices: [ 'present', 'absent']
+'''
+
+EXAMPLES = """
+# Set the federation parameter 'local_username' to a value of 'guest' (in quotes)
+- rabbitmq_parameter: component=federation
+ name=local-username
+ value='"guest"'
+ state=present
+"""
+
+class RabbitMqParameter(object):
+ def __init__(self, module, component, name, value, vhost, node):
+ self.module = module
+ self.component = component
+ self.name = name
+ self.value = value
+ self.vhost = vhost
+ self.node = node
+
+ self._value = None
+
+ self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmqctl, '-q', '-n', self.node]
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def get(self):
+ parameters = self._exec(['list_parameters', '-p', self.vhost], True)
+
+ for param_item in parameters:
+ component, name, value = param_item.split('\t')
+
+ if component == self.component and name == self.name:
+ self._value = json.loads(value)
+ return True
+ return False
+
+ def set(self):
+ self._exec(['set_parameter',
+ '-p',
+ self.vhost,
+ self.component,
+ self.name,
+ json.dumps(self.value)])
+
+ def delete(self):
+ self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name])
+
+ def has_modifications(self):
+ return self.value != self._value
+
+def main():
+ arg_spec = dict(
+ component=dict(required=True),
+ name=dict(required=True),
+ value=dict(default=None),
+ vhost=dict(default='/'),
+ state=dict(default='present', choices=['present', 'absent']),
+ node=dict(default='rabbit')
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ component = module.params['component']
+ name = module.params['name']
+ value = module.params['value']
+ if isinstance(value, str):
+ value = json.loads(value)
+ vhost = module.params['vhost']
+ state = module.params['state']
+ node = module.params['node']
+
+ rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node)
+
+ changed = False
+ if rabbitmq_parameter.get():
+ if state == 'absent':
+ rabbitmq_parameter.delete()
+ changed = True
+ else:
+ if rabbitmq_parameter.has_modifications():
+ rabbitmq_parameter.set()
+ changed = True
+ elif state == 'present':
+ rabbitmq_parameter.set()
+ changed = True
+
+ module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_plugin.py b/lib/ansible/modules/extras/messaging/rabbitmq_plugin.py
new file mode 100644
index 0000000000..b52de337e2
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_plugin.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_plugin
+short_description: Adds or removes plugins to RabbitMQ
+description:
+ - Enables or disables RabbitMQ plugins
+version_added: "1.1"
+author: '"Chris Hoffman (@chrishoffman)"'
+options:
+ names:
+ description:
+ - Comma-separated list of plugin names
+ required: true
+ default: null
+ aliases: [name]
+ new_only:
+ description:
+ - Only enable missing plugins
+ - Does not disable plugins that are not in the names list
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ state:
+ description:
+ - Specify if plugins are to be enabled or disabled
+ required: false
+ default: enabled
+ choices: [enabled, disabled]
+ prefix:
+ description:
+ - Specify a custom install prefix to a Rabbit
+ required: false
+ version_added: "1.3"
+ default: null
+'''
+
+EXAMPLES = '''
+# Enables the rabbitmq_management plugin
+- rabbitmq_plugin: names=rabbitmq_management state=enabled
+'''
+
+import os
+
+class RabbitMqPlugins(object):
+ def __init__(self, module):
+ self.module = module
+
+ if module.params['prefix']:
+ if os.path.isdir(os.path.join(module.params['prefix'], 'bin')):
+ bin_path = os.path.join(module.params['prefix'], 'bin')
+ elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')):
+ bin_path = os.path.join(module.params['prefix'], 'sbin')
+ else:
+ # No such path exists.
+ raise Exception("No binary folder in prefix %s" %
+ module.params['prefix'])
+
+ self._rabbitmq_plugins = bin_path + "/rabbitmq-plugins"
+
+ else:
+ self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmq_plugins]
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def get_all(self):
+ list_output = self._exec(['list', '-E', '-m'], True)
+ plugins = []
+ for plugin in list_output:
+ if not plugin:
+ break
+ plugins.append(plugin)
+
+ return plugins
+
+ def enable(self, name):
+ self._exec(['enable', name])
+
+ def disable(self, name):
+ self._exec(['disable', name])
+
+
+def main():
+ arg_spec = dict(
+ names=dict(required=True, aliases=['name']),
+ new_only=dict(default='no', type='bool'),
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ prefix=dict(required=False, default=None)
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ names = module.params['names'].split(',')
+ new_only = module.params['new_only']
+ state = module.params['state']
+
+ rabbitmq_plugins = RabbitMqPlugins(module)
+ enabled_plugins = rabbitmq_plugins.get_all()
+
+ enabled = []
+ disabled = []
+ if state == 'enabled':
+ if not new_only:
+ for plugin in enabled_plugins:
+ if plugin not in names:
+ rabbitmq_plugins.disable(plugin)
+ disabled.append(plugin)
+
+ for name in names:
+ if name not in enabled_plugins:
+ rabbitmq_plugins.enable(name)
+ enabled.append(name)
+ else:
+ for plugin in enabled_plugins:
+ if plugin in names:
+ rabbitmq_plugins.disable(plugin)
+ disabled.append(plugin)
+
+ changed = len(enabled) > 0 or len(disabled) > 0
+ module.exit_json(changed=changed, enabled=enabled, disabled=disabled)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_policy.py b/lib/ansible/modules/extras/messaging/rabbitmq_policy.py
new file mode 100644
index 0000000000..a9207b3cbc
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_policy.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, John Dewey <john@dewey.ws>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_policy
+short_description: Manage the state of policies in RabbitMQ.
+description:
+ - Manage the state of a virtual host in RabbitMQ.
+version_added: "1.5"
+author: "John Dewey (@retr0h)"
+options:
+ name:
+ description:
+ - The name of the policy to manage.
+ required: true
+ default: null
+ vhost:
+ description:
+ - The name of the vhost to apply to.
+ required: false
+ default: /
+ apply_to:
+ description:
+ - What the policy applies to. Requires RabbitMQ 3.2.0 or later.
+ required: false
+ default: all
+ choices: [all, exchanges, queues]
+ version_added: "2.1"
+ pattern:
+ description:
+ - A regex of queues to apply the policy to.
+ required: true
+ default: null
+ tags:
+ description:
+ - A dict or string describing the policy.
+ required: true
+ default: null
+ priority:
+ description:
+ - The priority of the policy.
+ required: false
+ default: 0
+ node:
+ description:
+ - Erlang node name of the rabbit we wish to configure.
+ required: false
+ default: rabbit
+ state:
+ description:
+ - The state of the policy.
+ default: present
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: ensure the default vhost contains the HA policy via a dict
+ rabbitmq_policy: name=HA pattern='.*'
+ args:
+ tags:
+ "ha-mode": all
+
+- name: ensure the default vhost contains the HA policy
+ rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all"
+'''
+class RabbitMqPolicy(object):
+ def __init__(self, module, name):
+ self._module = module
+ self._name = name
+ self._vhost = module.params['vhost']
+ self._pattern = module.params['pattern']
+ self._apply_to = module.params['apply_to']
+ self._tags = module.params['tags']
+ self._priority = module.params['priority']
+ self._node = module.params['node']
+ self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self._module.check_mode or (self._module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmqctl, '-q', '-n', self._node]
+ args.insert(1, '-p')
+ args.insert(2, self._vhost)
+ rc, out, err = self._module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def list(self):
+ policies = self._exec(['list_policies'], True)
+
+ for policy in policies:
+ policy_name = policy.split('\t')[1]
+ if policy_name == self._name:
+ return True
+ return False
+
+ def set(self):
+ import json
+ args = ['set_policy']
+ args.append(self._name)
+ args.append(self._pattern)
+ args.append(json.dumps(self._tags))
+ args.append('--priority')
+ args.append(self._priority)
+ if (self._apply_to != 'all'):
+ args.append('--apply-to')
+ args.append(self._apply_to)
+ return self._exec(args)
+
+ def clear(self):
+ return self._exec(['clear_policy', self._name])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ vhost=dict(default='/'),
+ pattern=dict(required=True),
+ apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']),
+ tags=dict(type='dict', required=True),
+ priority=dict(default='0'),
+ node=dict(default='rabbit'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ rabbitmq_policy = RabbitMqPolicy(module, name)
+
+ changed = False
+ if rabbitmq_policy.list():
+ if state == 'absent':
+ rabbitmq_policy.clear()
+ changed = True
+ else:
+ changed = False
+ elif state == 'present':
+ rabbitmq_policy.set()
+ changed = True
+
+ module.exit_json(changed=changed, name=name, state=state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_queue.py b/lib/ansible/modules/extras/messaging/rabbitmq_queue.py
new file mode 100644
index 0000000000..afdd410349
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_queue.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_queue
+author: "Manuel Sousa (@manuel-sousa)"
+version_added: "2.0"
+
+short_description: This module manages rabbitMQ queues
+description:
+ - This module uses rabbitMQ Rest API to create/delete queues
+requirements: [ "requests >= 1.0.0" ]
+options:
+ name:
+ description:
+ - Name of the queue to create
+ required: true
+ state:
+ description:
+ - Whether the queue should be present or absent
+ - Only present implemented atm
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ login_user:
+ description:
+ - rabbitMQ user for connection
+ required: false
+ default: guest
+ login_password:
+ description:
+ - rabbitMQ password for connection
+ required: false
+ default: false
+ login_host:
+ description:
+ - rabbitMQ host for connection
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - rabbitMQ management api port
+ required: false
+ default: 15672
+ vhost:
+ description:
+ - rabbitMQ virtual host
+ required: false
+ default: "/"
+ durable:
+ description:
+ - whether queue is durable or not
+ required: false
+ choices: [ "yes", "no" ]
+ default: yes
+ auto_delete:
+ description:
+ - if the queue should delete itself after all queues/queues unbound from it
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ message_ttl:
+ description:
+ - How long a message can live in queue before it is discarded (milliseconds)
+ required: False
+ default: forever
+ auto_expires:
+ description:
+ - How long a queue can be unused before it is automatically deleted (milliseconds)
+ required: false
+ default: forever
+ max_length:
+ description:
+ - How many messages can the queue contain before it starts rejecting
+ required: false
+ default: no limit
+ dead_letter_exchange:
+ description:
+ - Optional name of an exchange to which messages will be republished if they
+ - are rejected or expire
+ required: false
+ default: None
+ dead_letter_routing_key:
+ description:
+ - Optional replacement routing key to use when a message is dead-lettered.
+ - Original routing key will be used if unset
+ required: false
+ default: None
+ arguments:
+ description:
+ - extra arguments for queue. If defined this argument is a key/value dictionary
+ required: false
+ default: {}
+'''
+
+EXAMPLES = '''
+# Create a queue
+- rabbitmq_queue: name=myQueue
+
+# Create a queue on remote host
+- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org
+'''
+
+import requests
+import urllib
+import json
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ name = dict(required=True, type='str'),
+ login_user = dict(default='guest', type='str'),
+ login_password = dict(default='guest', type='str', no_log=True),
+ login_host = dict(default='localhost', type='str'),
+ login_port = dict(default='15672', type='str'),
+ vhost = dict(default='/', type='str'),
+ durable = dict(default=True, type='bool'),
+ auto_delete = dict(default=False, type='bool'),
+ message_ttl = dict(default=None, type='int'),
+ auto_expires = dict(default=None, type='int'),
+ max_length = dict(default=None, type='int'),
+ dead_letter_exchange = dict(default=None, type='str'),
+ dead_letter_routing_key = dict(default=None, type='str'),
+ arguments = dict(default=dict(), type='dict')
+ ),
+ supports_check_mode = True
+ )
+
+ url = "http://%s:%s/api/queues/%s/%s" % (
+ module.params['login_host'],
+ module.params['login_port'],
+ urllib.quote(module.params['vhost'],''),
+ module.params['name']
+ )
+
+ # Check if queue already exists
+ r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
+
+ if r.status_code==200:
+ queue_exists = True
+ response = r.json()
+ elif r.status_code==404:
+ queue_exists = False
+ response = r.text
+ else:
+ module.fail_json(
+ msg = "Invalid response from RESTAPI when trying to check if queue exists",
+ details = r.text
+ )
+
+ if module.params['state']=='present':
+ change_required = not queue_exists
+ else:
+ change_required = queue_exists
+
+ # Check if attributes change on existing queue
+ if not change_required and r.status_code==200 and module.params['state'] == 'present':
+ if not (
+ response['durable'] == module.params['durable'] and
+ response['auto_delete'] == module.params['auto_delete'] and
+ (
+ ( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or
+ ( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None )
+ ) and
+ (
+ ( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or
+ ( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None )
+ ) and
+ (
+ ( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or
+ ( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
+ ) and
+ (
+ ( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
+ ( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
+ ) and
+ (
+ ( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
+ ( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
+ )
+ ):
+ module.fail_json(
+ msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
+ )
+
+
+ # Copy parameters to arguments as used by RabbitMQ
+ for k,v in {
+ 'message_ttl': 'x-message-ttl',
+ 'auto_expires': 'x-expires',
+ 'max_length': 'x-max-length',
+ 'dead_letter_exchange': 'x-dead-letter-exchange',
+ 'dead_letter_routing_key': 'x-dead-letter-routing-key'
+ }.items():
+ if module.params[k]:
+ module.params['arguments'][v] = module.params[k]
+
+ # Exit if check_mode
+ if module.check_mode:
+ module.exit_json(
+ changed= change_required,
+ name = module.params['name'],
+ details = response,
+ arguments = module.params['arguments']
+ )
+
+ # Do changes
+ if change_required:
+ if module.params['state'] == 'present':
+ r = requests.put(
+ url,
+ auth = (module.params['login_user'],module.params['login_password']),
+ headers = { "content-type": "application/json"},
+ data = json.dumps({
+ "durable": module.params['durable'],
+ "auto_delete": module.params['auto_delete'],
+ "arguments": module.params['arguments']
+ })
+ )
+ elif module.params['state'] == 'absent':
+ r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
+
+ if r.status_code == 204:
+ module.exit_json(
+ changed = True,
+ name = module.params['name']
+ )
+ else:
+ module.fail_json(
+ msg = "Error creating queue",
+ status = r.status_code,
+ details = r.text
+ )
+
+ else:
+ module.exit_json(
+ changed = False,
+ name = module.params['name']
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_user.py b/lib/ansible/modules/extras/messaging/rabbitmq_user.py
new file mode 100644
index 0000000000..103650e2c9
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_user.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_user
+short_description: Adds or removes users to RabbitMQ
+description:
+ - Add or remove users to RabbitMQ and assign permissions
+version_added: "1.1"
+author: '"Chris Hoffman (@chrishoffman)"'
+options:
+ user:
+ description:
+ - Name of user to add
+ required: true
+ default: null
+ aliases: [username, name]
+ password:
+ description:
+ - Password of user to add.
+ - To change the password of an existing user, you must also specify
+ C(force=yes).
+ required: false
+ default: null
+ tags:
+ description:
+ - User tags specified as comma delimited
+ required: false
+ default: null
+ permissions:
+ description:
+ - a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv,
+ and represents a permission rule for that vhost.
+ - This option should be preferable when you care about all permissions of the user.
+ - You should use vhost, configure_priv, write_priv, and read_priv options instead
+ if you care about permissions for just some vhosts.
+ required: false
+ default: []
+ vhost:
+ description:
+ - vhost to apply access privileges.
+ - This option will be ignored when permissions option is used.
+ required: false
+ default: /
+ node:
+ description:
+ - erlang node name of the rabbit we wish to configure
+ required: false
+ default: rabbit
+ version_added: "1.2"
+ configure_priv:
+ description:
+ - Regular expression to restrict configure actions on a resource
+ for the specified vhost.
+ - By default all actions are restricted.
+ - This option will be ignored when permissions option is used.
+ required: false
+ default: ^$
+ write_priv:
+ description:
+ - Regular expression to restrict configure actions on a resource
+ for the specified vhost.
+ - By default all actions are restricted.
+ - This option will be ignored when permissions option is used.
+ required: false
+ default: ^$
+ read_priv:
+ description:
+ - Regular expression to restrict configure actions on a resource
+ for the specified vhost.
+ - By default all actions are restricted.
+ - This option will be ignored when permissions option is used.
+ required: false
+ default: ^$
+ force:
+ description:
+ - Deletes and recreates the user.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ state:
+ description:
+ - Specify if user is to be added or removed
+ required: false
+ default: present
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+# Add user to server and assign full access control on / vhost.
+# The user might have permission rules for other vhost but you don't care.
+- rabbitmq_user: user=joe
+ password=changeme
+ vhost=/
+ configure_priv=.*
+ read_priv=.*
+ write_priv=.*
+ state=present
+
+# Add user to server and assign full access control on / vhost.
+# The user doesn't have permission rules for other vhosts
+- rabbitmq_user: user=joe
+ password=changeme
+ permissions=[{vhost='/', configure_priv='.*', read_priv='.*', write_priv='.*'}]
+ state=present
+'''
+
+class RabbitMqUser(object):
+ def __init__(self, module, username, password, tags, permissions,
+ node, bulk_permissions=False):
+ self.module = module
+ self.username = username
+ self.password = password
+ self.node = node
+ if not tags:
+ self.tags = list()
+ else:
+ self.tags = tags.split(',')
+
+ self.permissions = permissions
+ self.bulk_permissions = bulk_permissions
+
+ self._tags = None
+ self._permissions = []
+ self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmqctl, '-q']
+ if self.node is not None:
+ cmd.extend(['-n', self.node])
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def get(self):
+ users = self._exec(['list_users'], True)
+
+ for user_tag in users:
+ if '\t' not in user_tag:
+ continue
+
+ user, tags = user_tag.split('\t')
+
+ if user == self.username:
+ for c in ['[',']',' ']:
+ tags = tags.replace(c, '')
+
+ if tags != '':
+ self._tags = tags.split(',')
+ else:
+ self._tags = list()
+
+ self._permissions = self._get_permissions()
+ return True
+ return False
+
+ def _get_permissions(self):
+ perms_out = self._exec(['list_user_permissions', self.username], True)
+
+ perms_list = list()
+ for perm in perms_out:
+ vhost, configure_priv, write_priv, read_priv = perm.split('\t')
+ if not self.bulk_permissions:
+ if vhost == self.permissions[0]['vhost']:
+ perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
+ write_priv=write_priv, read_priv=read_priv))
+ break
+ else:
+ perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
+ write_priv=write_priv, read_priv=read_priv))
+ return perms_list
+
+ def add(self):
+ if self.password is not None:
+ self._exec(['add_user', self.username, self.password])
+ else:
+ self._exec(['add_user', self.username, ''])
+ self._exec(['clear_password', self.username])
+
+ def delete(self):
+ self._exec(['delete_user', self.username])
+
+ def set_tags(self):
+ self._exec(['set_user_tags', self.username] + self.tags)
+
+ def set_permissions(self):
+ for permission in self._permissions:
+ if permission not in self.permissions:
+ cmd = ['clear_permissions', '-p']
+ cmd.append(permission['vhost'])
+ cmd.append(self.username)
+ self._exec(cmd)
+ for permission in self.permissions:
+ if permission not in self._permissions:
+ cmd = ['set_permissions', '-p']
+ cmd.append(permission['vhost'])
+ cmd.append(self.username)
+ cmd.append(permission['configure_priv'])
+ cmd.append(permission['write_priv'])
+ cmd.append(permission['read_priv'])
+ self._exec(cmd)
+
+ def has_tags_modifications(self):
+ return set(self.tags) != set(self._tags)
+
+ def has_permissions_modifications(self):
+ return self._permissions != self.permissions
+
+def main():
+ arg_spec = dict(
+ user=dict(required=True, aliases=['username', 'name']),
+ password=dict(default=None),
+ tags=dict(default=None),
+ permissions=dict(default=list(), type='list'),
+ vhost=dict(default='/'),
+ configure_priv=dict(default='^$'),
+ write_priv=dict(default='^$'),
+ read_priv=dict(default='^$'),
+ force=dict(default='no', type='bool'),
+ state=dict(default='present', choices=['present', 'absent']),
+ node=dict(default=None)
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ username = module.params['user']
+ password = module.params['password']
+ tags = module.params['tags']
+ permissions = module.params['permissions']
+ vhost = module.params['vhost']
+ configure_priv = module.params['configure_priv']
+ write_priv = module.params['write_priv']
+ read_priv = module.params['read_priv']
+ force = module.params['force']
+ state = module.params['state']
+ node = module.params['node']
+
+ bulk_permissions = True
+ if permissions == []:
+ perm = {
+ 'vhost': vhost,
+ 'configure_priv': configure_priv,
+ 'write_priv': write_priv,
+ 'read_priv': read_priv
+ }
+ permissions.append(perm)
+ bulk_permissions = False
+
+ rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
+ node, bulk_permissions=bulk_permissions)
+
+ changed = False
+ if rabbitmq_user.get():
+ if state == 'absent':
+ rabbitmq_user.delete()
+ changed = True
+ else:
+ if force:
+ rabbitmq_user.delete()
+ rabbitmq_user.add()
+ rabbitmq_user.get()
+ changed = True
+
+ if rabbitmq_user.has_tags_modifications():
+ rabbitmq_user.set_tags()
+ changed = True
+
+ if rabbitmq_user.has_permissions_modifications():
+ rabbitmq_user.set_permissions()
+ changed = True
+ elif state == 'present':
+ rabbitmq_user.add()
+ rabbitmq_user.set_tags()
+ rabbitmq_user.set_permissions()
+ changed = True
+
+ module.exit_json(changed=changed, user=username, state=state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_vhost.py b/lib/ansible/modules/extras/messaging/rabbitmq_vhost.py
new file mode 100644
index 0000000000..dbde32393c
--- /dev/null
+++ b/lib/ansible/modules/extras/messaging/rabbitmq_vhost.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_vhost
+short_description: Manage the state of a virtual host in RabbitMQ
+description:
+ - Manage the state of a virtual host in RabbitMQ
+version_added: "1.1"
+author: '"Chris Hoffman (@choffman)"'
+options:
+ name:
+ description:
+ - The name of the vhost to manage
+ required: true
+ default: null
+ aliases: [vhost]
+ node:
+ description:
+ - erlang node name of the rabbit we wish to configure
+ required: false
+ default: rabbit
+ version_added: "1.2"
+ tracing:
+ description:
+ - Enable/disable tracing for a vhost
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [trace]
+ state:
+ description:
+ - The state of vhost
+ default: present
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+# Ensure that the vhost /test exists.
+- rabbitmq_vhost: name=/test state=present
+'''
+
+class RabbitMqVhost(object):
+ def __init__(self, module, name, tracing, node):
+ self.module = module
+ self.name = name
+ self.tracing = tracing
+ self.node = node
+
+ self._tracing = False
+ self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmqctl, '-q', '-n', self.node]
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def get(self):
+ vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True)
+
+ for vhost in vhosts:
+ name, tracing = vhost.split('\t')
+ if name == self.name:
+ self._tracing = self.module.boolean(tracing)
+ return True
+ return False
+
+ def add(self):
+ return self._exec(['add_vhost', self.name])
+
+ def delete(self):
+ return self._exec(['delete_vhost', self.name])
+
+ def set_tracing(self):
+ if self.tracing != self._tracing:
+ if self.tracing:
+ self._enable_tracing()
+ else:
+ self._disable_tracing()
+ return True
+ return False
+
+ def _enable_tracing(self):
+ return self._exec(['trace_on', '-p', self.name])
+
+ def _disable_tracing(self):
+ return self._exec(['trace_off', '-p', self.name])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True, aliases=['vhost']),
+ tracing=dict(default='off', aliases=['trace'], type='bool'),
+ state=dict(default='present', choices=['present', 'absent']),
+ node=dict(default='rabbit'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ tracing = module.params['tracing']
+ state = module.params['state']
+ node = module.params['node']
+
+ rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node)
+
+ changed = False
+ if rabbitmq_vhost.get():
+ if state == 'absent':
+ rabbitmq_vhost.delete()
+ changed = True
+ else:
+ if rabbitmq_vhost.set_tracing():
+ changed = True
+ elif state == 'present':
+ rabbitmq_vhost.add()
+ rabbitmq_vhost.set_tracing()
+ changed = True
+
+ module.exit_json(changed=changed, name=name, state=state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/__init__.py b/lib/ansible/modules/extras/monitoring/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/__init__.py
diff --git a/lib/ansible/modules/extras/monitoring/airbrake_deployment.py b/lib/ansible/modules/extras/monitoring/airbrake_deployment.py
new file mode 100644
index 0000000000..262c3d2b44
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/airbrake_deployment.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+version_added: "1.2"
+author: "Bruce Pennypacker (@bpennypacker)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ required: false
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://airbrake.io/deploys.txt"
+ version_added: "1.5"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- airbrake_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=4.2
+'''
+
+import urllib
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[rails_env]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[scm_repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[scm_revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to airbrake
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/monitoring/bigpanda.py b/lib/ansible/modules/extras/monitoring/bigpanda.py
new file mode 100644
index 0000000000..df8e55fd74
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/bigpanda.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+version_added: "1.8"
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+options:
+ component:
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ alias: name
+ version:
+ description:
+ - The deployment version.
+ required: true
+ token:
+ description:
+ - API token.
+ required: true
+ state:
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ description:
+ - Name of affected host name. Can be a list.
+ required: false
+ default: machine's hostname
+ alias: host
+ env:
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ description:
+ - Base URL of the API server.
+ required: False
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
+...
+- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished
+
+If outside servers aren't reachable from your machine, use local_action and override hosts:
+- local_action: bigpanda component=myapp version=1.3 token={{ bigpanda_token }} hosts={{ansible_hostname}} state=started
+ register: deployment
+...
+- local_action: bigpanda component=deployment.component version=deployment.version token=deployment.token state=finished
+'''
+
+# ===========================================
+# Module execution.
+#
+import socket
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ message=dict(required=False),
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default='yes', type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ check_invalid_arguments=False,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/boundary_meter.py b/lib/ansible/modules/extras/monitoring/boundary_meter.py
new file mode 100644
index 0000000000..3729b606a1
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/boundary_meter.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to add boundary meters.
+
+(c) 2013, curtis <curtis@serverascode.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import datetime
+import base64
+import os
+
+DOCUMENTATION = '''
+
+module: boundary_meter
+short_description: Manage boundary meters
+description:
+ - This module manages boundary meters
+version_added: "1.3"
+author: "curtis (@ccollicutt)"
+requirements:
+ - Boundary API access
+ - bprobe is required to send data, but not to register a meter
+options:
+ name:
+ description:
+ - meter name
+ required: true
+ state:
+ description:
+ - Whether to create or remove the client from boundary
+ required: false
+ default: true
+ choices: ["present", "absent"]
+ apiid:
+ description:
+ - Organizations boundary API ID
+ required: true
+ apikey:
+ description:
+ - Organizations boundary API KEY
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
+notes:
+ - This module does not yet support boundary tags.
+
+'''
+
+EXAMPLES='''
+- name: Create meter
+ boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
+
+- name: Delete meter
+ boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
+
+'''
+
+api_host = "api.boundary.com"
+config_directory = "/etc/bprobe"
+
+# "resource" like thing or apikey?
+def auth_encode(apikey):
+ auth = base64.standard_b64encode(apikey)
+ auth.replace("\n", "")
+ return auth
+
+def build_url(name, apiid, action, meter_id=None, cert_type=None):
+ if action == "create":
+ return 'https://%s/%s/meters' % (api_host, apiid)
+ elif action == "search":
+ return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
+ elif action == "certificates":
+ return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
+ elif action == "tags":
+ return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
+ elif action == "delete":
+ return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
+
+def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
+
+ if meter_id is None:
+ url = build_url(name, apiid, action)
+ else:
+ if cert_type is None:
+ url = build_url(name, apiid, action, meter_id)
+ else:
+ url = build_url(name, apiid, action, meter_id, cert_type)
+
+ headers = dict()
+ headers["Authorization"] = "Basic %s" % auth_encode(apikey)
+ headers["Content-Type"] = "application/json"
+
+ return fetch_url(module, url, data=data, headers=headers)
+
+def create_meter(module, name, apiid, apikey):
+
+ meters = search_meter(module, name, apiid, apikey)
+
+ if len(meters) > 0:
+ # If the meter already exists, do nothing
+ module.exit_json(status="Meter " + name + " already exists",changed=False)
+ else:
+ # If it doesn't exist, create it
+ body = '{"name":"' + name + '"}'
+ response, info = http_request(module, name, apiid, apikey, data=body, action="create")
+
+ if info['status'] != 200:
+ module.fail_json(msg="Failed to connect to api host to create meter")
+
+ # If the config directory doesn't exist, create it
+ if not os.path.exists(config_directory):
+ try:
+ os.makedirs(config_directory)
+ except:
+ module.fail_json("Could not create " + config_directory)
+
+
+ # Download both cert files from the api host
+ types = ['key', 'cert']
+ for cert_type in types:
+ try:
+ # If we can't open the file it's not there, so we should download it
+ cert_file = open('%s/%s.pem' % (config_directory,cert_type))
+ except IOError:
+ # Now download the file...
+ rc = download_request(module, name, apiid, apikey, cert_type)
+ if rc == False:
+ module.fail_json("Download request for " + cert_type + ".pem failed")
+
+ return 0, "Meter " + name + " created"
+
+def search_meter(module, name, apiid, apikey):
+
+ response, info = http_request(module, name, apiid, apikey, action="search")
+
+ if info['status'] != 200:
+ module.fail_json("Failed to connect to api host to search for meter")
+
+ # Return meters
+ return json.loads(response.read())
+
+def get_meter_id(module, name, apiid, apikey):
+ # In order to delete the meter we need its id
+ meters = search_meter(module, name, apiid, apikey)
+
+ if len(meters) > 0:
+ return meters[0]['id']
+ else:
+ return None
+
+def delete_meter(module, name, apiid, apikey):
+
+ meter_id = get_meter_id(module, name, apiid, apikey)
+
+ if meter_id is None:
+ return 1, "Meter does not exist, so can't delete it"
+ else:
+ response, info = http_request(module, name, apiid, apikey, action, meter_id)
+ if info['status'] != 200:
+ module.fail_json("Failed to delete meter")
+
+ # Each new meter gets a new key.pem and ca.pem file, so they should be deleted
+ types = ['cert', 'key']
+ for cert_type in types:
+ try:
+ cert_file = '%s/%s.pem' % (config_directory,cert_type)
+ os.remove(cert_file)
+ except OSError, e:
+ module.fail_json("Failed to remove " + cert_type + ".pem file")
+
+ return 0, "Meter " + name + " deleted"
+
+def download_request(module, name, apiid, apikey, cert_type):
+
+ meter_id = get_meter_id(module, name, apiid, apikey)
+
+ if meter_id is not None:
+ action = "certificates"
+ response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
+ if info['status'] != 200:
+ module.fail_json("Failed to connect to api host to download certificate")
+
+ if result:
+ try:
+ cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
+ body = response.read()
+ cert_file = open(cert_file_path, 'w')
+ cert_file.write(body)
+ cert_file.close()
+ os.chmod(cert_file_path, int('0600', 8))
+ except:
+ module.fail_json("Could not write to certificate file")
+
+ return True
+ else:
+ module.fail_json("Could not get meter id")
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=False),
+ apikey=dict(required=True),
+ apiid=dict(required=True),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name= module.params['name']
+ apikey = module.params['api_key']
+ apiid = module.params['api_id']
+
+ if state == "present":
+ (rc, result) = create_meter(module, name, apiid, apikey)
+
+ if state == "absent":
+ (rc, result) = delete_meter(module, name, apiid, apikey)
+
+ if rc != 0:
+ module.fail_json(msg=result)
+
+ module.exit_json(status=result,changed=True)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/monitoring/circonus_annotation.py b/lib/ansible/modules/extras/monitoring/circonus_annotation.py
new file mode 100644
index 0000000000..9c5fbbb0fd
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/circonus_annotation.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014-2015, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+import requests
+import time
+import json
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+version_added: 2.0
+requirements:
+ - urllib3
+ - requests
+ - time
+options:
+ api_key:
+ description:
+ - Circonus API key
+ required: true
+ category:
+ description:
+ - Annotation Category
+ required: true
+ description:
+ description:
+ - Description of annotation
+ required: true
+ title:
+ description:
+ - Title of annotation
+ required: true
+ start:
+ description:
+ - Unix timestamp of event start, defaults to now
+ required: false
+ stop:
+ description:
+ - Unix timestamp of event end, defaults to now + duration
+ required: false
+ duration:
+ description:
+ - Duration in seconds of annotation, defaults to 0
+ required: false
+'''
+EXAMPLES = '''
+# Create a simple annotation event with a source, defaults to start and end time of now
+- circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ description: 'This is a detailed description of the config change'
+ category: 'This category groups like annotations'
+# Create an annotation with a duration of 5 minutes and a default start time of now
+- circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ description: 'This is a detailed description of the config change'
+ category: 'This category groups like annotations'
+ duration: 300
+# Create an annotation with a start_time and end_time
+- circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ description: 'This is a detailed description of the config change'
+ category: 'This category groups like annotations'
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ if module.params['duration'] != None:
+ duration = module.params['duration']
+ else:
+ duration = 0
+ if module.params['start'] != None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] != None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time())+ duration
+ annotation['start'] = int(start)
+ annotation['stop'] = int(stop)
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(required=False, type='int'),
+ stop=dict(required=False, type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(required=False, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException, err_str:
+ module.fail_json(msg='Request Failed', reason=err_str)
+ module.exit_json(changed=True, annotation=resp.json())
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/datadog_event.py b/lib/ansible/modules/extras/monitoring/datadog_event.py
new file mode 100644
index 0000000000..88d921bf91
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/datadog_event.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Import Datadog
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except:
+ HAS_DATADOG = False
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to DataDog service
+description:
+- "Allows to post events to DataDog (www.datadoghq.com) service."
+- "Uses http://docs.datadoghq.com/api/#events API."
+version_added: "1.3"
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
+notes: []
+requirements: []
+options:
+ api_key:
+ description: ["Your DataDog API key."]
+ required: true
+ default: null
+ app_key:
+ description: ["Your DataDog app key."]
+ required: true
+ version_added: "2.2"
+ title:
+ description: ["The event title."]
+ required: true
+ default: null
+ text:
+ description: ["The body of the event."]
+ required: true
+ default: null
+ date_happened:
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ required: false
+ default: now
+ priority:
+ description: ["The priority of the event."]
+ required: false
+ default: normal
+ choices: [normal, low]
+ tags:
+ description: ["Comma separated list of tags to apply to the event."]
+ required: false
+ default: null
+ alert_type:
+ description: ["Type of alert."]
+ required: false
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ description: ["An arbitrary string to use for aggregation."]
+ required: false
+ default: null
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+'''
+
+EXAMPLES = '''
+# Post an event with low priority
+datadog_event: title="Testing from ansible" text="Test!" priority="low"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN"
+# Post an event with several tags
+datadog_event: title="Testing from ansible" text="Test!"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN"
+ tags=aa,bb,#host:{{ inventory_hostname }}
+'''
+
+# Import Datadog
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(required=False, default=None, type='int'),
+ priority=dict(
+ required=False, default='normal', choices=['normal', 'low']
+ ),
+ tags=dict(required=False, default=None, type='list'),
+ alert_type=dict(
+ required=False, default='info',
+ choices=['error', 'warning', 'info', 'success']
+ ),
+ aggregation_key=dict(required=False, default=None),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg='datadogpy required for this module')
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/datadog_monitor.py b/lib/ansible/modules/extras/monitoring/datadog_monitor.py
new file mode 100644
index 0000000000..208dc73305
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/datadog_monitor.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# import module snippets
+
+# Import Datadog
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except:
+ HAS_DATADOG = False
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+- "Manages monitors within Datadog"
+- "Options like described on http://docs.datadoghq.com/api/"
+version_added: "2.0"
+author: "Sebastian Kornehl (@skornehl)"
+notes: []
+requirements: [datadog]
+options:
+ api_key:
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ description: ["Your DataDog app key."]
+ required: true
+ state:
+ description: ["The designated state of the monitor."]
+ required: true
+ choices: ['present', 'absent', 'muted', 'unmuted']
+ tags:
+ description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
+ required: false
+ default: None
+ version_added: 2.2
+ type:
+ description:
+ - "The type of the monitor."
+ - The 'event alert'is available starting at Ansible 2.1
+ required: false
+ default: null
+ choices: ['metric alert', 'service check', 'event alert']
+ query:
+ description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
+ required: false
+ default: null
+ name:
+ description: ["The name of the alert."]
+ required: true
+ message:
+ description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
+ required: false
+ default: null
+ silenced:
+ description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
+ required: false
+ default: ""
+ notify_no_data:
+ description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
+ required: false
+ default: False
+ no_data_timeframe:
+ description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
+ required: false
+ default: 2x timeframe for metric, 2 minutes for service
+ timeout_h:
+ description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
+ required: false
+ default: null
+ renotify_interval:
+ description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
+ required: false
+ default: null
+ escalation_message:
+ description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
+ required: false
+ default: null
+ notify_audit:
+ description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
+ required: false
+ default: False
+ thresholds:
+ description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
+ required: false
+ default: {'ok': 1, 'critical': 1, 'warning': 1}
+ locked:
+ description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
+ required: false
+ default: False
+ version_added: 2.2
+'''
+
+EXAMPLES = '''
+# Create a metric monitor
+datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
+ message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+# Deletes a monitor
+datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+# Mutes a monitor
+datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+# Unmutes a monitor
+datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
+ type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
+ name=dict(required=True),
+ query=dict(required=False),
+ message=dict(required=False, default=None),
+ silenced=dict(required=False, default=None, type='dict'),
+ notify_no_data=dict(required=False, default=False, type='bool'),
+ no_data_timeframe=dict(required=False, default=None),
+ timeout_h=dict(required=False, default=None),
+ renotify_interval=dict(required=False, default=None),
+ escalation_message=dict(required=False, default=None),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool')
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg='datadogpy required for this module')
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+def _fix_template_vars(message):
+ return message.replace('[[', '{{').replace(']]', '}}')
+
+
+def _get_monitor(module):
+ for monitor in api.Monitor.get_all():
+ if monitor['name'] == module.params['name']:
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=module.params['name'], message=_fix_template_vars(module.params['message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=module.params['name'], message=_fix_template_vars(module.params['message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None
+ and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/honeybadger_deployment.py b/lib/ansible/modules/extras/monitoring/honeybadger_deployment.py
new file mode 100644
index 0000000000..3a6d2df7c8
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/honeybadger_deployment.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+version_added: "2.2"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ environment:
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ default: None
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ default: None
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ required: false
+ default: None
+ url:
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- honeybadger_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=b6826b8
+ repo=git@github.com:user/repo.git
+'''
+
+RETURN = '''# '''
+
+import urllib
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import *
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % e)
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/monitoring/librato_annotation.py b/lib/ansible/modules/extras/monitoring/librato_annotation.py
new file mode 100644
index 0000000000..f174bda0ea
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/librato_annotation.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+version_added: "1.6"
+author: "Seth Edwards (@sedward)"
+requirements: []
+options:
+ user:
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ description:
+ - Librato account api key
+ required: true
+ name:
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ description:
+ - The description contains extra meta-data about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ description:
+ - The unix timestamp indicating the the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ description:
+ - The unix timestamp indicating the the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ description:
+ - See examples
+ required: true
+'''
+
+EXAMPLES = '''
+# Create a simple annotation event with a source
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ source: 'foo.bar'
+ description: 'This is a detailed description of the config change'
+
+# Create an annotation that includes a link
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: 'code.deploy'
+ title: 'app code deploy'
+ description: 'this is a detailed description of a deployment'
+ links:
+ - { rel: 'example', href: 'http://www.example.com/deploy' }
+
+# Create an annotation with a start_time and end_time
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: 'maintenance'
+ title: 'Maintenance window'
+ description: 'This is a detailed description of maintenance'
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] != None:
+ params['source'] = module.params['source']
+ if module.params['description'] != None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] != None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] != None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] != None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg="Request Failed", reason=e.reason)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ user = dict(required=True),
+ api_key = dict(required=True),
+ name = dict(required=False),
+ title = dict(required=True),
+ source = dict(required=False),
+ description = dict(required=False),
+ start_time = dict(required=False, default=None, type='int'),
+ end_time = dict(require=False, default=None, type='int'),
+ links = dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/logentries.py b/lib/ansible/modules/extras/monitoring/logentries.py
new file mode 100644
index 0000000000..a347afd84c
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/logentries.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl <ivan@app.io>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+version_added: "1.6"
+options:
+ path:
+ description:
+ - path to a log file
+ required: true
+ state:
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ name:
+ description:
+ - name of the log
+ required: false
+ logtype:
+ description:
+ - type of the log
+ required: false
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log
+- logentries: path=/var/log/nginx/error.log state=absent
+'''
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name',name])
+ if logtype:
+ cmd.extend(['--type',logtype])
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop incase of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(required=True),
+ state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name = dict(required=False, default=None, type='str'),
+ logtype = dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/logicmonitor.py b/lib/ansible/modules/extras/monitoring/logicmonitor.py
new file mode 100644
index 0000000000..8d35f3bfbb
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/logicmonitor.py
@@ -0,0 +1,2169 @@
+#!/usr/bin/python
+
+"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
+ Copyright (C) 2015 LogicMonitor
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
+
+import datetime
+import os
+import platform
+import socket
+import sys
+import types
+import urllib
+
+HAS_LIB_JSON = True
+try:
+ import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if (
+ not isinstance(json.loads, types.FunctionType) or
+ not isinstance(json.dumps, types.FunctionType)
+ ):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print(
+ '\n{"msg": "Error: ansible requires the stdlib json or ' +
+ 'simplejson module, neither was found!", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+ except SyntaxError:
+ print(
+ '\n{"msg": "SyntaxError: probably due to installed simplejson ' +
+ 'being for a different python version", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+
+RETURN = '''
+---
+success:
+ description: flag indicating that execution was successful
+ returned: success
+ type: boolean
+ sample: True
+...
+'''
+
+
+DOCUMENTATION = '''
+---
+module: logicmonitor
+short_description: Manage your LogicMonitor account through Ansible Playbooks
+description:
+ - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
+ - This module manages hosts, host groups, and collectors within your LogicMonitor account.
+version_added: "2.2"
+author: Ethan Culler-Mayeno, Jeff Wozniak
+notes:
+ - You must have an existing LogicMonitor account for this module to function.
+requirements: ["An existing LogicMonitor account", "Linux"]
+options:
+ target:
+ description:
+ - The type of LogicMonitor object you wish to manage.
+ - "Collector: Perform actions on a LogicMonitor collector"
+ - NOTE You should use Ansible service modules such as 'service' or 'supervisorctl' for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove.
+ - "Host: Perform actions on a host device"
+ - "Hostgroup: Perform actions on a LogicMonitor host group"
+ - NOTE Host and Hostgroup tasks should always be performed via local_action. There are no benefits to running these tasks on the remote host and doing so will typically cause problems.
+ required: true
+ default: null
+ choices: ['collector', 'host', 'datsource', 'hostgroup']
+ action:
+ description:
+ - The action you wish to perform on target
+ - "Add: Add an object to your LogicMonitor account"
+ - "Remove: Remove an object from your LogicMonitor account"
+ - "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account"
+ - "SDT: Schedule downtime for an object in your LogicMonitor account"
+ required: true
+ default: null
+ choices: ['add', 'remove', 'update', 'sdt']
+ company:
+ description:
+ - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes"
+ required: true
+ default: null
+ user:
+ description:
+ - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user
+ required: true
+ default: null
+ password:
+ description:
+ - The password of the specified LogicMonitor user
+ required: true
+ default: null
+ collector:
+ description:
+ - The fully qualified domain name of a collector in your LogicMonitor account.
+ - This is required for the creation of a LogicMonitor host (target=host action=add)
+ - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt)
+ required: false
+ default: null
+ hostname:
+ description:
+ - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage.
+ - Optional for managing hosts (target=host)
+ required: false
+ default: 'hostname -f'
+ displayname:
+ description:
+ - The display name of a host in your LogicMonitor account or the desired display name of a device to manage.
+ - Optional for managing hosts (target=host)
+ required: false
+ default: 'hostname -f'
+ description:
+ description:
+ - The long text description of the object in your LogicMonitor account
+ - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update)
+ required: false
+ default: ""
+ properties:
+ description:
+ - A dictionary of properties to set on the LogicMonitor host or host group.
+ - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update)
+ - This parameter will add or update existing properties in your LogicMonitor account or
+ required: false
+ default: {}
+ groups:
+ description:
+ - A list of groups that the host should be a member of.
+ - Optional for managing hosts (target=host; action=add or action=update)
+ required: false
+ default: []
+ id:
+ description:
+ - ID of the datasource to target
+ - Required for management of LogicMonitor datasources (target=datasource)
+ required: false
+ default: null
+ fullpath:
+ description:
+ - The fullpath of the host group object you would like to manage
+ - Recommend running on a single Ansible host
+ - Required for management of LogicMonitor host groups (target=hostgroup)
+ required: false
+ default: null
+ alertenable:
+ description:
+ - A boolean flag to turn alerting on or off for an object
+ - Optional for managing all hosts (action=add or action=update)
+ required: false
+ default: true
+ choices: [true, false]
+ starttime:
+ description:
+ - The time that the Scheduled Down Time (SDT) should begin
+ - Optional for managing SDT (action=sdt)
+ - Y-m-d H:M
+ required: false
+ default: Now
+ duration:
+ description:
+ - The duration (minutes) of the Scheduled Down Time (SDT)
+ - Optional for putting an object into SDT (action=sdt)
+ required: false
+ default: 30
+...
+'''
+EXAMPLES = '''
+ # example of adding a new LogicMonitor collector to these devices
+ ---
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy/verify LogicMonitor collectors
+ become: yes
+ logicmonitor:
+ target=collector
+ action=add
+ company={{ company }}
+ user={{ user }}
+ password={{ password }}
+
+ #example of adding a list of hosts into monitoring
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy LogicMonitor Host
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector='mycompany-Collector'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ groups="/servers/production,/datacenter1"
+ properties="{'snmp.community':'secret','dc':'1', 'type':'prod'}"
+
+ #example of putting a datasource in SDT
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT a datasource
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=datasource
+ action=sdt
+ id='123'
+ duration=3000
+ starttime='2017-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ #example of creating a hostgroup
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Create a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=add
+ fullpath='/servers/development'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'type':'dev'}"
+
+ #example of putting a list of hosts into SDT
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=sdt
+ duration=3000
+ starttime='2016-11-10 09:08'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+
+ #example of putting a host group in SDT
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=sdt
+ fullpath='/servers/development'
+ duration=3000
+ starttime='2017-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ #example of updating a list of hosts
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Update a list of hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=update
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+ groups="/servers/production,/datacenter5"
+ properties="{'snmp.community':'commstring','dc':'5'}"
+
+ #example of updating a hostgroup
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Update a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=update
+ fullpath='/servers/development'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'hg', 'type':'dev', 'status':'test'}"
+
+ #example of removing a list of hosts from monitoring
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+
+ #example of removing a host group
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor development servers hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/servers/development'
+ - name: Remove LogicMonitor servers hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/servers'
+ - name: Remove LogicMonitor datacenter1 hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/datacenter1'
+ - name: Remove LogicMonitor datacenter5 hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/datacenter5'
+
+ ### example of removing a new LogicMonitor collector to these devices
+ ---
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor collectors
+ become: yes
+ logicmonitor:
+ target=collector
+ action=remove
+ company={{ company }}
+ user={{ user }}
+ password={{ password }}
+
+ #complete example
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Create a host group
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=add
+ fullpath='/servers/production/database'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring'}"
+ - name: SDT a host group
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=sdt
+ fullpath='/servers/production/web'
+ duration=3000
+ starttime='2012-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy/verify LogicMonitor collectors
+ logicmonitor:
+ target: collector
+ action: add
+ company: {{ company }}
+ user: {{ user }}
+ password: {{ password }}
+ - name: Place LogicMonitor collectors into 30 minute Scheduled downtime
+ logicmonitor: target=collector action=sdt company={{ company }}
+ user={{ user }} password={{ password }}
+ - name: Deploy LogicMonitor Host
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector=agent1.ethandev.com
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'dc':'1'}"
+ groups="/servers/production/collectors, /datacenter1"
+
+ - hosts: database-servers
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: deploy logicmonitor hosts
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector=monitoring.dev.com
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'type':'db', 'dc':'1'}"
+ groups="/servers/production/database, /datacenter1"
+ - name: schedule 5 hour downtime for 2012-11-10 09:08
+ local_action: >
+ logicmonitor
+ target=host
+ action=sdt
+ duration=3000
+ starttime='2012-11-10 09:08'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+'''
+
+
+class LogicMonitor(object):
+
+ def __init__(self, module, **params):
+ self.__version__ = "1.0-python"
+ self.module = module
+ self.module.debug("Instantiating LogicMonitor object")
+
+ self.check_mode = False
+ self.company = params["company"]
+ self.user = params["user"]
+ self.password = params["password"]
+ self.fqdn = socket.getfqdn()
+ self.lm_url = "logicmonitor.com/santaba"
+ self.__version__ = self.__version__ + "-ansible-module"
+
+ def rpc(self, action, params):
+ """Make a call to the LogicMonitor RPC library
+ and return the response"""
+ self.module.debug("Running LogicMonitor.rpc")
+
+ param_str = urllib.urlencode(params)
+ creds = urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password})
+
+ if param_str:
+ param_str = param_str + "&"
+
+ param_str = param_str + creds
+
+ try:
+ url = ("https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "?" + param_str)
+
+ # Set custom LogicMonitor header with version
+ headers = {"X-LM-User-Agent": self.__version__}
+
+ # Set headers
+ f = open_url(url, headers=headers)
+
+ raw = f.read()
+ resp = json.loads(raw)
+ if resp["status"] == 403:
+ self.module.debug("Authentication failed.")
+ self.fail(msg="Error: " + resp["errmsg"])
+ else:
+ return raw
+ except IOError:
+ self.fail(msg="Error: Unknown exception making RPC call")
+
+ def do(self, action, params):
+ """Make a call to the LogicMonitor
+ server \"do\" function"""
+ self.module.debug("Running LogicMonitor.do...")
+
+ param_str = urllib.urlencode(params)
+ creds = (urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password}))
+
+ if param_str:
+ param_str = param_str + "&"
+ param_str = param_str + creds
+
+ try:
+ self.module.debug("Attempting to open URL: " +
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "?" + param_str)
+ f = open_url(
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "?" + param_str)
+ return f.read()
+ except IOError:
+ # self.module.debug("Error opening URL. " + ioe)
+ self.fail("Unknown exception opening URL")
+
+ def get_collectors(self):
+ """Returns a JSON object containing a list of
+ LogicMonitor collectors"""
+ self.module.debug("Running LogicMonitor.get_collectors...")
+
+ self.module.debug("Making RPC call to 'getAgents'")
+ resp = self.rpc("getAgents", {})
+ resp_json = json.loads(resp)
+
+ if resp_json["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return resp_json["data"]
+ else:
+ self.fail(msg=resp)
+
+ def get_host_by_hostname(self, hostname, collector):
+ """Returns a host object for the host matching the
+ specified hostname"""
+ self.module.debug("Running LogicMonitor.get_host_by_hostname...")
+
+ self.module.debug("Looking for hostname " + hostname)
+ self.module.debug("Making RPC call to 'getHosts'")
+ hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
+
+ if collector:
+ if hostlist_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ hosts = hostlist_json["data"]["hosts"]
+
+ self.module.debug(
+ "Looking for host matching: hostname " + hostname +
+ " and collector " + str(collector["id"]))
+
+ for host in hosts:
+ if (host["hostName"] == hostname and
+ host["agentId"] == collector["id"]):
+
+ self.module.debug("Host match found")
+ return host
+ self.module.debug("No host match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(hostlist_json)
+ else:
+ self.module.debug("No collector specified")
+ return None
+
+ def get_host_by_displayname(self, displayname):
+ """Returns a host object for the host matching the
+ specified display name"""
+ self.module.debug("Running LogicMonitor.get_host_by_displayname...")
+
+ self.module.debug("Looking for displayname " + displayname)
+ self.module.debug("Making RPC call to 'getHost'")
+ host_json = (json.loads(self.rpc("getHost",
+ {"displayName": displayname})))
+
+ if host_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return host_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(host_json)
+ return None
+
+ def get_collector_by_description(self, description):
+ """Returns a JSON collector object for the collector
+ matching the specified FQDN (description)"""
+ self.module.debug(
+ "Running LogicMonitor.get_collector_by_description..."
+ )
+
+ collector_list = self.get_collectors()
+ if collector_list is not None:
+ self.module.debug("Looking for collector with description {0}" +
+ description)
+ for collector in collector_list:
+ if collector["description"] == description:
+ self.module.debug("Collector match found")
+ return collector
+ self.module.debug("No collector match found")
+ return None
+
+ def get_group(self, fullpath):
+ """Returns a JSON group object for the group matching the
+ specified path"""
+ self.module.debug("Running LogicMonitor.get_group...")
+
+ self.module.debug("Making RPC call to getHostGroups")
+ resp = json.loads(self.rpc("getHostGroups", {}))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC called succeeded")
+ groups = resp["data"]
+
+ self.module.debug("Looking for group matching " + fullpath)
+ for group in groups:
+ if group["fullPath"] == fullpath.lstrip('/'):
+ self.module.debug("Group match found")
+ return group
+
+ self.module.debug("No group match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+
+ return None
+
+ def create_group(self, fullpath):
+ """Recursively create a path of host groups.
+ Returns the id of the newly created hostgroup"""
+ self.module.debug("Running LogicMonitor.create_group...")
+
+ res = self.get_group(fullpath)
+ if res:
+ self.module.debug("Group {0} exists." + fullpath)
+ return res["id"]
+
+ if fullpath == "/":
+ self.module.debug("Specified group is root. Doing nothing.")
+ return 1
+ else:
+ self.module.debug("Creating group named " + fullpath)
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ parentpath, name = fullpath.rsplit('/', 1)
+ parentgroup = self.get_group(parentpath)
+
+ parentid = 1
+
+ if parentpath == "":
+ parentid = 1
+ elif parentgroup:
+ parentid = parentgroup["id"]
+ else:
+ parentid = self.create_group(parentpath)
+
+ h = None
+
+ # Determine if we're creating a group from host or hostgroup class
+ if hasattr(self, '_build_host_group_hash'):
+ h = self._build_host_group_hash(
+ fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["name"] = name
+ h["parentId"] = parentid
+ else:
+ h = {"name": name,
+ "parentId": parentid,
+ "alertEnable": True,
+ "description": ""}
+
+ self.module.debug("Making RPC call to 'addHostGroup'")
+ resp = json.loads(
+ self.rpc("addHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["id"]
+ elif resp["errmsg"] == "The record already exists":
+ self.module.debug("The hostgroup already exists")
+ group = self.get_group(fullpath)
+ return group["id"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(
+ msg="Error: unable to create new hostgroup \"" +
+ name + "\".\n" + resp["errmsg"])
+
+ def fail(self, msg):
+ self.module.fail_json(msg=msg, changed=self.change, failed=True)
+
+ def exit(self, changed):
+ self.module.debug("Changed: " + changed)
+ self.module.exit_json(changed=changed, success=True)
+
+ def output_info(self, info):
+ self.module.debug("Registering properties as Ansible facts")
+ self.module.exit_json(changed=False, ansible_facts=info)
+
+
+class Collector(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor Collector object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **params)
+ self.module.debug("Instantiating Collector object")
+
+ if self.params['description']:
+ self.description = self.params['description']
+ else:
+ self.description = self.fqdn
+
+ self.info = self._get()
+ self.installdir = "/usr/local/logicmonitor"
+ self.platform = platform.system()
+ self.is_64bits = sys.maxsize > 2**32
+ self.duration = self.params['duration']
+ self.starttime = self.params['starttime']
+
+ if self.info is None:
+ self.id = None
+ else:
+ self.id = self.info["id"]
+
+ def create(self):
+ """Idempotent function to make sure that there is
+ a running collector installed and registered"""
+ self.module.debug("Running Collector.create...")
+
+ self._create()
+ self.get_installer_binary()
+ self.install()
+
+ def remove(self):
+ """Idempotent function to make sure that there is
+ not a running collector installed and registered"""
+ self.module.debug("Running Collector.destroy...")
+
+ self._unreigster()
+ self.uninstall()
+
+ def get_installer_binary(self):
+ """Download the LogicMonitor collector installer binary"""
+ self.module.debug("Running Collector.get_installer_binary...")
+
+ arch = 32
+
+ if self.is_64bits:
+ self.module.debug("64 bit system")
+ arch = 64
+ else:
+ self.module.debug("32 bit system")
+
+ if self.platform == "Linux" and self.id is not None:
+ self.module.debug("Platform is Linux")
+ self.module.debug("Agent ID is " + str(self.id))
+
+ installfilepath = (self.installdir +
+ "/logicmonitorsetup" +
+ str(self.id) + "_" + str(arch) +
+ ".bin")
+
+ self.module.debug("Looking for existing installer at " +
+ installfilepath)
+ if not os.path.isfile(installfilepath):
+ self.module.debug("No previous installer found")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Downloading installer file")
+ # attempt to create the install dir before download
+ self.module.run_command("mkdir " + self.installdir)
+
+ try:
+ f = open(installfilepath, "w")
+ installer = (self.do("logicmonitorsetup",
+ {"id": self.id,
+ "arch": arch}))
+ f.write(installer)
+ f.closed
+ except:
+ self.fail(msg="Unable to open installer file for writing")
+ f.closed
+ else:
+ self.module.debug("Collector installer already exists")
+ return installfilepath
+
+ elif self.id is None:
+ self.fail(
+ msg="Error: There is currently no collector " +
+ "associated with this device. To download " +
+ " the installer, first create a collector " +
+ "for this device.")
+ elif self.platform != "Linux":
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device.")
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve the installer from the server")
+
+ def install(self):
+ """Execute the LogicMonitor installer if not
+ already installed"""
+ self.module.debug("Running Collector.install...")
+
+ if self.platform == "Linux":
+ self.module.debug("Platform is Linux")
+
+ installer = self.get_installer_binary()
+
+ if self.info is None:
+ self.module.debug("Retriving collector information")
+ self.info = self._get()
+
+ if not os.path.exists(self.installdir + "/agent"):
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Setting installer file permissions")
+ os.chmod(installer, 484) # decimal for 0o744
+
+ self.module.debug("Executing installer")
+ ret_code, out, err = self.module.run_command(installer + " -y")
+
+ if ret_code != 0:
+ self.fail(msg="Error: Unable to install collector: " + err)
+ else:
+ self.module.debug("Collector installed successfully")
+ else:
+ self.module.debug("Collector already installed")
+ else:
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device")
+
+ def uninstall(self):
+ """Uninstall LogicMontitor collector from the system"""
+ self.module.debug("Running Collector.uninstall...")
+
+ uninstallfile = self.installdir + "/agent/bin/uninstall.pl"
+
+ if os.path.isfile(uninstallfile):
+ self.module.debug("Collector uninstall file exists")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Running collector uninstaller")
+ ret_code, out, err = self.module.run_command(uninstallfile)
+
+ if ret_code != 0:
+ self.fail(
+ msg="Error: Unable to uninstall collector: " + err)
+ else:
+ self.module.debug("Collector successfully uninstalled")
+ else:
+ if os.path.exists(self.installdir + "/agent"):
+ (self.fail(
+ msg="Unable to uninstall LogicMonitor " +
+ "Collector. Can not find LogicMonitor " +
+ "uninstaller."))
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Collector.sdt...")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offsetstart = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"agentId": self.id,
+ "type": 1,
+ "notifyCC": True,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setAgentSDT'")
+ resp = json.loads(self.rpc("setAgentSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+ def site_facts(self):
+ """Output current properties information for the Collector"""
+ self.module.debug("Running Collector.site_facts...")
+
+ if self.info:
+ self.module.debug("Collector exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Collector doesn't exit.")
+
+ def _get(self):
+ """Returns a JSON object representing this collector"""
+ self.module.debug("Running Collector._get...")
+ collector_list = self.get_collectors()
+
+ if collector_list is not None:
+ self.module.debug("Collectors returned")
+ for collector in collector_list:
+ if collector["description"] == self.description:
+ return collector
+ else:
+ self.module.debug("No collectors returned")
+ return None
+
+ def _create(self):
+ """Create a new collector in the associated
+ LogicMonitor account"""
+ self.module.debug("Running Collector._create...")
+
+ if self.platform == "Linux":
+ self.module.debug("Platform is Linux")
+ ret = self.info or self._get()
+
+ if ret is None:
+ self.change = True
+ self.module.debug("System changed")
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = {"autogen": True,
+ "description": self.description}
+
+ self.module.debug("Making RPC call to 'addAgent'")
+ create = (json.loads(self.rpc("addAgent", h)))
+
+ if create["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ self.info = create["data"]
+ self.id = create["data"]["id"]
+ return create["data"]
+ else:
+ self.fail(msg=create["errmsg"])
+ else:
+ self.info = ret
+ self.id = ret["id"]
+ return ret
+ else:
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device.")
+
+ def _unreigster(self):
+ """Delete this collector from the associated
+ LogicMonitor account"""
+ self.module.debug("Running Collector._unreigster...")
+
+ if self.info is None:
+ self.module.debug("Retrieving collector information")
+ self.info = self._get()
+
+ if self.info is not None:
+ self.module.debug("Collector found")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteAgent'")
+ delete = json.loads(self.rpc("deleteAgent",
+ {"id": self.id}))
+
+ if delete["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return delete
+ else:
+ # The collector couldn't unregister. Start the service again
+ self.module.debug("Error unregistering collecting. " +
+ delete["errmsg"])
+ self.fail(msg=delete["errmsg"])
+ else:
+ self.module.debug("Collector not found")
+ return None
+
+
+class Host(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+ self.collector = None
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Host object")
+
+ if self.params["hostname"]:
+ self.module.debug("Hostname is " + self.params["hostname"])
+ self.hostname = self.params['hostname']
+ else:
+ self.module.debug("No hostname specified. Using " + self.fqdn)
+ self.hostname = self.fqdn
+
+ if self.params["displayname"]:
+ self.module.debug("Display name is " + self.params["displayname"])
+ self.displayname = self.params['displayname']
+ else:
+ self.module.debug("No display name specified. Using " + self.fqdn)
+ self.displayname = self.fqdn
+
+ # Attempt to host information via display name of host name
+ self.module.debug("Attempting to find host by displayname " +
+ self.displayname)
+ info = self.get_host_by_displayname(self.displayname)
+
+ if info is not None:
+ self.module.debug("Host found by displayname")
+ # Used the host information to grab the collector description
+ # if not provided
+ if (not hasattr(self.params, "collector") and
+ "agentDescription" in info):
+ self.module.debug("Setting collector from host response. " +
+ "Collector " + info["agentDescription"])
+ self.params["collector"] = info["agentDescription"]
+ else:
+ self.module.debug("Host not found by displayname")
+
+ # At this point, a valid collector description is required for success
+ # Check that the description exists or fail
+ if self.params["collector"]:
+ self.module.debug(
+ "Collector specified is " +
+ self.params["collector"]
+ )
+ self.collector = (self.get_collector_by_description(
+ self.params["collector"]))
+ else:
+ self.fail(msg="No collector specified.")
+
+ # If the host wasn't found via displayname, attempt by hostname
+ if info is None:
+ self.module.debug("Attempting to find host by hostname " +
+ self.hostname)
+ info = self.get_host_by_hostname(self.hostname, self.collector)
+
+ self.info = info
+ self.properties = self.params["properties"]
+ self.description = self.params["description"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+ self.alertenable = self.params["alertenable"]
+ if self.params["groups"] is not None:
+ self.groups = self._strip_groups(self.params["groups"])
+ else:
+ self.groups = None
+
+ def create(self):
+ """Idemopotent function to create if missing,
+ update if changed, or skip"""
+ self.module.debug("Running Host.create...")
+
+ self.update()
+
+ def get_properties(self):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Host.get_properties...")
+
+ if self.info:
+ self.module.debug("Making RPC call to 'getHostProperties'")
+ properties_json = (json.loads(self.rpc("getHostProperties",
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("Error: there was an issue retrieving the " +
+ "host properties")
+ self.module.debug(properties_json["errmsg"])
+
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug(
+ "Unable to find LogicMonitor host which matches " +
+ self.displayname + " (" + self.hostname + ")"
+ )
+ return None
+
+ def set_properties(self, propertyhash):
+ """update the host to have the properties
+ contained in the property hash"""
+ self.module.debug("Running Host.set_properties...")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Assigning property hash to host object")
+ self.properties = propertyhash
+
+ def add(self):
+ """Add this device to monitoring
+ in your LogicMonitor account"""
+ self.module.debug("Running Host.add...")
+
+ if self.collector and not self.info:
+ self.module.debug("Host not registered. Registering.")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = self._build_host_hash(
+ self.hostname,
+ self.displayname,
+ self.collector,
+ self.description,
+ self.groups,
+ self.properties,
+ self.alertenable)
+
+ self.module.debug("Making RPC call to 'addHost'")
+ resp = json.loads(self.rpc("addHost", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ return resp["errmsg"]
+ elif self.collector is None:
+ self.fail(msg="Specified collector doesn't exist")
+ else:
+ self.module.debug("Host already registered")
+
+ def update(self):
+ """This method takes changes made to this host
+ and applies them to the corresponding host
+ in your LogicMonitor account."""
+ self.module.debug("Running Host.update...")
+
+ if self.info:
+ self.module.debug("Host already registed")
+ if self.is_changed():
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = (self._build_host_hash(
+ self.hostname,
+ self.displayname,
+ self.collector,
+ self.description,
+ self.groups,
+ self.properties,
+ self.alertenable))
+ h["id"] = self.info["id"]
+ h["opType"] = "replace"
+
+ self.module.debug("Making RPC call to 'updateHost'")
+ resp = json.loads(self.rpc("updateHost", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg="Error: unable to update the host.")
+ else:
+ self.module.debug(
+ "Host properties match supplied properties. " +
+ "No changes to make."
+ )
+ return self.info
+ else:
+ self.module.debug("Host not registed. Registering")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ return self.add()
+
+ def remove(self):
+ """Remove this host from your LogicMonitor account"""
+ self.module.debug("Running Host.remove...")
+
+ if self.info:
+ self.module.debug("Host registered")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteHost'")
+ resp = json.loads(self.rpc("deleteHost",
+ {"hostId": self.info["id"],
+ "deleteFromSystem": True,
+ "hostGroupId": 1}))
+
+ if resp["status"] == 200:
+ self.module.debug(resp)
+ self.module.debug("RPC call succeeded")
+ return resp
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ self.fail(msg=resp["errmsg"])
+
+ else:
+ self.module.debug("Host not registered")
+
+ def is_changed(self):
+ """Return true if the host doesn't
+ match the LogicMonitor account"""
+ self.module.debug("Running Host.is_changed")
+
+ ignore = ['system.categories', 'snmp.version']
+
+ hostresp = self.get_host_by_displayname(self.displayname)
+
+ if hostresp is None:
+ hostresp = self.get_host_by_hostname(self.hostname, self.collector)
+
+ if hostresp:
+ self.module.debug("Comparing simple host properties")
+ if hostresp["alertEnable"] != self.alertenable:
+ return True
+
+ if hostresp["description"] != self.description:
+ return True
+
+ if hostresp["displayedAs"] != self.displayname:
+ return True
+
+ if (self.collector and
+ hasattr(self.collector, "id") and
+ hostresp["agentId"] != self.collector["id"]):
+ return True
+
+ self.module.debug("Comparing groups.")
+ if self._compare_groups(hostresp) is True:
+ return True
+
+ propresp = self.get_properties()
+
+ if propresp:
+ self.module.debug("Comparing properties.")
+ if self._compare_props(propresp, ignore) is True:
+ return True
+ else:
+ self.fail(
+ msg="Error: Unknown error retrieving host properties")
+
+ return False
+ else:
+ self.fail(msg="Error: Unknown error retrieving host information")
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Host.sdt...")
+ if self.info:
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offset = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = (json.loads(self.rpc("getTimeZoneSetting", {})))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostId": self.info["id"],
+ "type": 1,
+ "year": offsetstart.year,
+ "month": offsetstart.month - 1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month - 1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setHostSDT'")
+ resp = (json.loads(self.rpc("setHostSDT", h)))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+ def site_facts(self):
+ """Output current properties information for the Host"""
+ self.module.debug("Running Host.site_facts...")
+
+ if self.info:
+ self.module.debug("Host exists")
+ props = self.get_properties()
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+ def _build_host_hash(self,
+ hostname,
+ displayname,
+ collector,
+ description,
+ groups,
+ properties,
+ alertenable):
+ """Return a property formated hash for the
+ creation of a host using the rpc function"""
+ self.module.debug("Running Host._build_host_hash...")
+
+ h = {}
+ h["hostName"] = hostname
+ h["displayedAs"] = displayname
+ h["alertEnable"] = alertenable
+
+ if collector:
+ self.module.debug("Collector property exists")
+ h["agentId"] = collector["id"]
+ else:
+ self.fail(
+ msg="Error: No collector found. Unable to build host hash.")
+
+ if description:
+ h["description"] = description
+
+ if groups is not None and groups is not []:
+ self.module.debug("Group property exists")
+ groupids = ""
+
+ for group in groups:
+ groupids = groupids + str(self.create_group(group)) + ","
+
+ h["hostGroupIds"] = groupids.rstrip(',')
+
+ if properties is not None and properties is not {}:
+ self.module.debug("Properties hash exists")
+ propnum = 0
+ for key, value in properties.iteritems():
+ h["propName" + str(propnum)] = key
+ h["propValue" + str(propnum)] = value
+ propnum = propnum + 1
+
+ return h
+
+ def _verify_property(self, propname):
+ """Check with LogicMonitor server to
+ verify property is unchanged"""
+ self.module.debug("Running Host._verify_property...")
+
+ if self.info:
+ self.module.debug("Host is registered")
+ if propname not in self.properties:
+ self.module.debug("Property " + propname + " does not exist")
+ return False
+ else:
+ self.module.debug("Property " + propname + " exists")
+ h = {"hostId": self.info["id"],
+ "propName0": propname,
+ "propValue0": self.properties[propname]}
+
+ self.module.debug("Making RCP call to 'verifyProperties'")
+ resp = json.loads(self.rpc('verifyProperties', h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["match"]
+ else:
+ self.fail(
+ msg="Error: unable to get verification " +
+ "from server.\n%s" % resp["errmsg"])
+ else:
+ self.fail(
+ msg="Error: Host doesn't exist. Unable to verify properties")
+
+ def _compare_groups(self, hostresp):
+ """Function to compare the host's current
+ groups against provided groups"""
+ self.module.debug("Running Host._compare_groups")
+
+ g = []
+ fullpathinids = hostresp["fullPathInIds"]
+ self.module.debug("Building list of groups")
+ for path in fullpathinids:
+ if path != []:
+ h = {'hostGroupId': path[-1]}
+
+ hgresp = json.loads(self.rpc("getHostGroup", h))
+
+ if (hgresp["status"] == 200 and
+ hgresp["data"]["appliesTo"] == ""):
+
+ g.append(path[-1])
+
+ if self.groups is not None:
+ self.module.debug("Comparing group lists")
+ for group in self.groups:
+ groupjson = self.get_group(group)
+
+ if groupjson is None:
+ self.module.debug("Group mismatch. No result.")
+ return True
+ elif groupjson['id'] not in g:
+ self.module.debug("Group mismatch. ID doesn't exist.")
+ return True
+ else:
+ g.remove(groupjson['id'])
+
+ if g != []:
+ self.module.debug("Group mismatch. New ID exists.")
+ return True
+ self.module.debug("Groups match")
+
+ def _compare_props(self, propresp, ignore):
+ """Function to compare the host's current
+ properties against provided properties"""
+ self.module.debug("Running Host._compare_props...")
+ p = {}
+
+ self.module.debug("Creating list of properties")
+ for prop in propresp:
+ if prop["name"] not in ignore:
+ if ("*******" in prop["value"] and
+ self._verify_property(prop["name"])):
+ p[prop["name"]] = self.properties[prop["name"]]
+ else:
+ p[prop["name"]] = prop["value"]
+
+ self.module.debug("Comparing properties")
+ # Iterate provided properties and compare to received properties
+ for prop in self.properties:
+ if (prop not in p or
+ p[prop] != self.properties[prop]):
+ self.module.debug("Properties mismatch")
+ return True
+ self.module.debug("Properties match")
+
+ def _strip_groups(self, groups):
+ """Function to strip whitespace from group list.
+ This function provides the user some flexibility when
+ formatting group arguments """
+ self.module.debug("Running Host._strip_groups...")
+ return map(lambda x: x.strip(), groups)
+
+
+class Datasource(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor Datasource object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **params)
+ self.module.debug("Instantiating Datasource object")
+
+ self.id = self.params["id"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Datasource.sdt...")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offsetstart = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostDataSourceId": self.id,
+ "type": 1,
+ "notifyCC": True,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setHostDataSourceSDT'")
+ resp = json.loads(self.rpc("setHostDataSourceSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+
+class Hostgroup(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Hostgroup object")
+
+ self.fullpath = self.params["fullpath"]
+ self.info = self.get_group(self.fullpath)
+ self.properties = self.params["properties"]
+ self.description = self.params["description"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+ self.alertenable = self.params["alertenable"]
+
+ def create(self):
+ """Wrapper for self.update()"""
+ self.module.debug("Running Hostgroup.create...")
+ self.update()
+
+ def get_properties(self, final=False):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Hostgroup.get_properties...")
+
+ if self.info:
+ self.module.debug("Group found")
+
+ self.module.debug("Making RPC call to 'getHostGroupProperties'")
+ properties_json = json.loads(self.rpc(
+ "getHostGroupProperties",
+ {'hostGroupId': self.info["id"],
+ "finalResult": final}))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug("Group not found")
+ return None
+
+ def set_properties(self, propertyhash):
+ """Update the host to have the properties
+ contained in the property hash"""
+ self.module.debug("Running Hostgroup.set_properties")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Assigning property has to host object")
+ self.properties = propertyhash
+
+ def add(self):
+ """Idempotent function to ensure that the host
+ group exists in your LogicMonitor account"""
+ self.module.debug("Running Hostgroup.add")
+
+ if self.info is None:
+ self.module.debug("Group doesn't exist. Creating.")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.create_group(self.fullpath)
+ self.info = self.get_group(self.fullpath)
+
+ self.module.debug("Group created")
+ return self.info
+ else:
+ self.module.debug("Group already exists")
+
+ def update(self):
+ """Idempotent function to ensure the host group settings
+ (alertenable, properties, etc) in the
+ LogicMonitor account match the current object."""
+ self.module.debug("Running Hostgroup.update")
+
+ if self.info:
+ if self.is_changed():
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = self._build_host_group_hash(
+ self.fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["opType"] = "replace"
+
+ if self.fullpath != "/":
+ h["id"] = self.info["id"]
+
+ self.module.debug("Making RPC call to 'updateHostGroup'")
+ resp = json.loads(self.rpc("updateHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg="Error: Unable to update the " +
+ "host.\n" + resp["errmsg"])
+ else:
+ self.module.debug(
+ "Group properties match supplied properties. " +
+ "No changes to make"
+ )
+ return self.info
+ else:
+ self.module.debug("Group doesn't exist. Creating.")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ return self.add()
+
+ def remove(self):
+ """Idempotent function to ensure the host group
+ does not exist in your LogicMonitor account"""
+ self.module.debug("Running Hostgroup.remove...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteHostGroup'")
+ resp = json.loads(self.rpc("deleteHostGroup",
+ {"hgId": self.info["id"]}))
+
+ if resp["status"] == 200:
+ self.module.debug(resp)
+ self.module.debug("RPC call succeeded")
+ return resp
+ elif resp["errmsg"] == "No such group":
+ self.module.debug("Group doesn't exist")
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ self.fail(msg=resp["errmsg"])
+ else:
+ self.module.debug("Group doesn't exist")
+
+ def is_changed(self):
+ """Return true if the host doesn't match
+ the LogicMonitor account"""
+ self.module.debug("Running Hostgroup.is_changed...")
+
+ ignore = []
+ group = self.get_group(self.fullpath)
+ properties = self.get_properties()
+
+ if properties is not None and group is not None:
+ self.module.debug("Comparing simple group properties")
+ if (group["alertEnable"] != self.alertenable or
+ group["description"] != self.description):
+
+ return True
+
+ p = {}
+
+ self.module.debug("Creating list of properties")
+ for prop in properties:
+ if prop["name"] not in ignore:
+ if ("*******" in prop["value"] and
+ self._verify_property(prop["name"])):
+
+ p[prop["name"]] = (
+ self.properties[prop["name"]])
+ else:
+ p[prop["name"]] = prop["value"]
+
+ self.module.debug("Comparing properties")
+ if set(p) != set(self.properties):
+ return True
+ else:
+ self.module.debug("No property information received")
+ return False
+
+ def sdt(self, duration=30, starttime=None):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Hostgroup.sdt")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offset = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostGroupId": self.info["id"],
+ "type": 1,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to setHostGroupSDT")
+ resp = json.loads(self.rpc("setHostGroupSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+ def site_facts(self):
+ """Output current properties information for the Hostgroup"""
+ self.module.debug("Running Hostgroup.site_facts...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Group doesn't exit.")
+
+ def _build_host_group_hash(self,
+ fullpath,
+ description,
+ properties,
+ alertenable):
+ """Return a property formated hash for the
+ creation of a hostgroup using the rpc function"""
+ self.module.debug("Running Hostgroup._build_host_hash")
+
+ h = {}
+ h["alertEnable"] = alertenable
+
+ if fullpath == "/":
+ self.module.debug("Group is root")
+ h["id"] = 1
+ else:
+ self.module.debug("Determining group path")
+ parentpath, name = fullpath.rsplit('/', 1)
+ parent = self.get_group(parentpath)
+
+ h["name"] = name
+
+ if parent:
+ self.module.debug("Parent group " +
+ str(parent["id"]) + " found.")
+ h["parentID"] = parent["id"]
+ else:
+ self.module.debug("No parent group found. Using root.")
+ h["parentID"] = 1
+
+ if description:
+ self.module.debug("Description property exists")
+ h["description"] = description
+
+ if properties != {}:
+ self.module.debug("Properties hash exists")
+ propnum = 0
+ for key, value in properties.iteritems():
+ h["propName" + str(propnum)] = key
+ h["propValue" + str(propnum)] = value
+ propnum = propnum + 1
+
+ return h
+
+ def _verify_property(self, propname):
+ """Check with LogicMonitor server
+ to verify property is unchanged"""
+ self.module.debug("Running Hostgroup._verify_property")
+
+ if self.info:
+ self.module.debug("Group exists")
+ if propname not in self.properties:
+ self.module.debug("Property " + propname + " does not exist")
+ return False
+ else:
+ self.module.debug("Property " + propname + " exists")
+ h = {"hostGroupId": self.info["id"],
+ "propName0": propname,
+ "propValue0": self.properties[propname]}
+
+ self.module.debug("Making RCP call to 'verifyProperties'")
+ resp = json.loads(self.rpc('verifyProperties', h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["match"]
+ else:
+ self.fail(
+ msg="Error: unable to get verification " +
+ "from server.\n%s" % resp["errmsg"])
+ else:
+ self.fail(
+ msg="Error: Group doesn't exist. Unable to verify properties")
+
+
+def selector(module):
+ """Figure out which object and which actions
+ to take given the right parameters"""
+
+ if module.params["target"] == "collector":
+ target = Collector(module.params, module)
+ elif module.params["target"] == "host":
+ # Make sure required parameter collector is specified
+ if ((module.params["action"] == "add" or
+ module.params["displayname"] is None) and
+ module.params["collector"] is None):
+ module.fail_json(
+ msg="Parameter 'collector' required.")
+
+ target = Host(module.params, module)
+ elif module.params["target"] == "datasource":
+ # Validate target specific required parameters
+ if module.params["id"] is not None:
+ # make sure a supported action was specified
+ if module.params["action"] == "sdt":
+ target = Datasource(module.params, module)
+ else:
+ errmsg = ("Error: Unexpected action \"" +
+ module.params["action"] + "\" was specified.")
+ module.fail_json(msg=errmsg)
+
+ elif module.params["target"] == "hostgroup":
+ # Validate target specific required parameters
+ if module.params["fullpath"] is not None:
+ target = Hostgroup(module.params, module)
+ else:
+ module.fail_json(
+ msg="Parameter 'fullpath' required for target 'hostgroup'")
+ else:
+ module.fail_json(
+ msg="Error: Unexpected target \"" + module.params["target"] +
+ "\" was specified.")
+
+ if module.params["action"].lower() == "add":
+ action = target.create
+ elif module.params["action"].lower() == "remove":
+ action = target.remove
+ elif module.params["action"].lower() == "sdt":
+ action = target.sdt
+ elif module.params["action"].lower() == "update":
+ action = target.update
+ else:
+ errmsg = ("Error: Unexpected action \"" + module.params["action"] +
+ "\" was specified.")
+ module.fail_json(msg=errmsg)
+
+ action()
+ module.exit_json(changed=target.change)
+
+
+def main():
+ TARGETS = [
+ "collector",
+ "host",
+ "datasource",
+ "hostgroup"]
+
+ ACTIONS = [
+ "add",
+ "remove",
+ "sdt",
+ "update"]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(required=True, default=None, choices=TARGETS),
+ action=dict(required=True, default=None, choices=ACTIONS),
+ company=dict(required=True, default=None),
+ user=dict(required=True, default=None),
+ password=dict(required=True, default=None, no_log=True),
+
+ collector=dict(required=False, default=None),
+ hostname=dict(required=False, default=None),
+ displayname=dict(required=False, default=None),
+ id=dict(required=False, default=None),
+ description=dict(required=False, default=""),
+ fullpath=dict(required=False, default=None),
+ starttime=dict(required=False, default=None),
+ duration=dict(required=False, default=30),
+ properties=dict(required=False, default={}, type="dict"),
+ groups=dict(required=False, default=[], type="list"),
+ alertenable=dict(required=False, default="true", choices=BOOLEANS)
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_LIB_JSON is not True:
+ module.fail_json(msg="Unable to load JSON library")
+
+ selector(module)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.urls import open_url
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/logicmonitor_facts.py b/lib/ansible/modules/extras/monitoring/logicmonitor_facts.py
new file mode 100644
index 0000000000..cc91ca6122
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/logicmonitor_facts.py
@@ -0,0 +1,632 @@
+#!/usr/bin/python
+
+"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
+ Copyright (C) 2015 LogicMonitor
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
+
+
+import socket
+import sys
+import types
+import urllib
+
+HAS_LIB_JSON = True
+try:
+ import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if (
+ not isinstance(json.loads, types.FunctionType) or
+ not isinstance(json.dumps, types.FunctionType)
+ ):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print(
+ '\n{"msg": "Error: ansible requires the stdlib json or ' +
+ 'simplejson module, neither was found!", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+ except SyntaxError:
+ print(
+ '\n{"msg": "SyntaxError: probably due to installed simplejson ' +
+ 'being for a different python version", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+
+
+DOCUMENTATION = '''
+---
+module: logicmonitor_facts
+short_description: Collect facts about LogicMonitor objects
+description:
+ - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
+ - This module collects facts about hosts abd host groups within your LogicMonitor account.
+version_added: "2.2"
+author: Ethan Culler-Mayeno, Jeff Wozniak
+notes:
+ - You must have an existing LogicMonitor account for this module to function.
+requirements: ["An existing LogicMonitor account", "Linux"]
+options:
+ target:
+ description:
+ - The LogicMonitor object you wish to manage.
+ required: true
+ default: null
+ choices: ['host', 'hostgroup']
+ company:
+ description:
+ - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes"
+ required: true
+ default: null
+ user:
+ description:
+ - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user
+ required: true
+ default: null
+ password:
+ description:
+ - The password for the chosen LogicMonitor User
+ - If an md5 hash is used, the digest flag must be set to true
+ required: true
+ default: null
+ collector:
+ description:
+ - The fully qualified domain name of a collector in your LogicMonitor account.
+ - This is optional for querying a LogicMonitor host when a displayname is specified
+ - This is required for querying a LogicMonitor host when a displayname is not specified
+ required: false
+ default: null
+ hostname:
+ description:
+ - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring.
+ - Required for managing hosts (target=host)
+ required: false
+ default: 'hostname -f'
+ displayname:
+ description:
+ - The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring.
+ required: false
+ default: 'hostname -f'
+ fullpath:
+ description:
+ - The fullpath of the hostgroup object you would like to manage
+ - Recommend running on a single ansible host
+ - Required for management of LogicMonitor host groups (target=hostgroup)
+ required: false
+ default: null
+...
+'''
+
+EXAMPLES = '''
+#example of querying a list of hosts
+```
+---
+- hosts: hosts
+ user: root
+ vars:
+ company: 'yourcompany'
+ user: 'Luigi'
+ password: 'ImaLuigi,number1!'
+ tasks:
+ - name: query a list of hosts
+ # All tasks should use local_action
+ local_action:
+ logicmonitor_facts:
+ target: host
+ company: '{{ company }}'
+ user: '{{ user }}'
+ password: '{{ password }}'
+```
+
+#example of querying a hostgroup
+```
+---
+- hosts: somemachine.superheroes.com
+ user: root
+ vars:
+ company: 'yourcompany'
+ user: 'mario'
+ password: 'itsame.Mario!'
+ tasks:
+ - name: query a host group
+ # All tasks should use local_action
+ local_action:
+ logicmonitor_facts:
+ target: hostgroup
+ fullpath: '/servers/production'
+ company: '{{ company }}'
+ user: '{{ user }}'
+ password: '{{ password }}'
+```
+'''
+
+
+RETURN = '''
+---
+ ansible_facts:
+ description: LogicMonitor properties set for the specified object
+ returned: success
+ type: list of dicts containing name/value pairs
+ example: >
+ {
+ "name": "dc",
+ "value": "1"
+ },
+ {
+ "name": "type",
+ "value": "prod"
+ },
+ {
+ "name": "system.categories",
+ "value": ""
+ },
+ {
+ "name": "snmp.community",
+ "value": "********"
+ }
+...
+'''
+
+
+class LogicMonitor(object):
+
+ def __init__(self, module, **params):
+ self.__version__ = "1.0-python"
+ self.module = module
+ self.module.debug("Instantiating LogicMonitor object")
+
+ self.check_mode = False
+ self.company = params["company"]
+ self.user = params["user"]
+ self.password = params["password"]
+ self.fqdn = socket.getfqdn()
+ self.lm_url = "logicmonitor.com/santaba"
+ self.__version__ = self.__version__ + "-ansible-module"
+
+ def rpc(self, action, params):
+ """Make a call to the LogicMonitor RPC library
+ and return the response"""
+ self.module.debug("Running LogicMonitor.rpc")
+
+ param_str = urllib.urlencode(params)
+ creds = urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password})
+
+ if param_str:
+ param_str = param_str + "&"
+
+ param_str = param_str + creds
+
+ try:
+ url = ("https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "?" + param_str)
+
+ # Set custom LogicMonitor header with version
+ headers = {"X-LM-User-Agent": self.__version__}
+
+ # Set headers
+ f = open_url(url, headers=headers)
+
+ raw = f.read()
+ resp = json.loads(raw)
+ if resp["status"] == 403:
+ self.module.debug("Authentication failed.")
+ self.fail(msg="Error: " + resp["errmsg"])
+ else:
+ return raw
+ except IOError:
+ self.fail(msg="Error: Unknown exception making RPC call")
+
+ def get_collectors(self):
+ """Returns a JSON object containing a list of
+ LogicMonitor collectors"""
+ self.module.debug("Running LogicMonitor.get_collectors...")
+
+ self.module.debug("Making RPC call to 'getAgents'")
+ resp = self.rpc("getAgents", {})
+ resp_json = json.loads(resp)
+
+ if resp_json["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return resp_json["data"]
+ else:
+ self.fail(msg=resp)
+
+ def get_host_by_hostname(self, hostname, collector):
+ """Returns a host object for the host matching the
+ specified hostname"""
+ self.module.debug("Running LogicMonitor.get_host_by_hostname...")
+
+ self.module.debug("Looking for hostname " + hostname)
+ self.module.debug("Making RPC call to 'getHosts'")
+ hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
+
+ if collector:
+ if hostlist_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ hosts = hostlist_json["data"]["hosts"]
+
+ self.module.debug(
+ "Looking for host matching: hostname " + hostname +
+ " and collector " + str(collector["id"]))
+
+ for host in hosts:
+ if (host["hostName"] == hostname and
+ host["agentId"] == collector["id"]):
+
+ self.module.debug("Host match found")
+ return host
+ self.module.debug("No host match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(hostlist_json)
+ else:
+ self.module.debug("No collector specified")
+ return None
+
+ def get_host_by_displayname(self, displayname):
+ """Returns a host object for the host matching the
+ specified display name"""
+ self.module.debug("Running LogicMonitor.get_host_by_displayname...")
+
+ self.module.debug("Looking for displayname " + displayname)
+ self.module.debug("Making RPC call to 'getHost'")
+ host_json = (json.loads(self.rpc("getHost",
+ {"displayName": displayname})))
+
+ if host_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return host_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(host_json)
+ return None
+
+ def get_collector_by_description(self, description):
+ """Returns a JSON collector object for the collector
+ matching the specified FQDN (description)"""
+ self.module.debug(
+ "Running LogicMonitor.get_collector_by_description..."
+ )
+
+ collector_list = self.get_collectors()
+ if collector_list is not None:
+ self.module.debug("Looking for collector with description " +
+ description)
+ for collector in collector_list:
+ if collector["description"] == description:
+ self.module.debug("Collector match found")
+ return collector
+ self.module.debug("No collector match found")
+ return None
+
+ def get_group(self, fullpath):
+ """Returns a JSON group object for the group matching the
+ specified path"""
+ self.module.debug("Running LogicMonitor.get_group...")
+
+ self.module.debug("Making RPC call to getHostGroups")
+ resp = json.loads(self.rpc("getHostGroups", {}))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC called succeeded")
+ groups = resp["data"]
+
+ self.module.debug("Looking for group matching " + fullpath)
+ for group in groups:
+ if group["fullPath"] == fullpath.lstrip('/'):
+ self.module.debug("Group match found")
+ return group
+
+ self.module.debug("No group match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+
+ return None
+
+ def create_group(self, fullpath):
+ """Recursively create a path of host groups.
+ Returns the id of the newly created hostgroup"""
+ self.module.debug("Running LogicMonitor.create_group...")
+
+ res = self.get_group(fullpath)
+ if res:
+ self.module.debug("Group " + fullpath + " exists.")
+ return res["id"]
+
+ if fullpath == "/":
+ self.module.debug("Specified group is root. Doing nothing.")
+ return 1
+ else:
+ self.module.debug("Creating group named " + fullpath)
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ parentpath, name = fullpath.rsplit('/', 1)
+ parentgroup = self.get_group(parentpath)
+
+ parentid = 1
+
+ if parentpath == "":
+ parentid = 1
+ elif parentgroup:
+ parentid = parentgroup["id"]
+ else:
+ parentid = self.create_group(parentpath)
+
+ h = None
+
+ # Determine if we're creating a group from host or hostgroup class
+ if hasattr(self, '_build_host_group_hash'):
+ h = self._build_host_group_hash(
+ fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["name"] = name
+ h["parentId"] = parentid
+ else:
+ h = {"name": name,
+ "parentId": parentid,
+ "alertEnable": True,
+ "description": ""}
+
+ self.module.debug("Making RPC call to 'addHostGroup'")
+ resp = json.loads(
+ self.rpc("addHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["id"]
+ elif resp["errmsg"] == "The record already exists":
+ self.module.debug("The hostgroup already exists")
+ group = self.get_group(fullpath)
+ return group["id"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(
+ msg="Error: unable to create new hostgroup \"" + name +
+ "\".\n" + resp["errmsg"])
+
+ def fail(self, msg):
+ self.module.fail_json(msg=msg, changed=self.change)
+
+ def exit(self, changed):
+ self.module.debug("Changed: " + changed)
+ self.module.exit_json(changed=changed)
+
+ def output_info(self, info):
+ self.module.debug("Registering properties as Ansible facts")
+ self.module.exit_json(changed=False, ansible_facts=info)
+
+
+class Host(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+ self.collector = None
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Host object")
+
+ if self.params["hostname"]:
+ self.module.debug("Hostname is " + self.params["hostname"])
+ self.hostname = self.params['hostname']
+ else:
+ self.module.debug("No hostname specified. Using " + self.fqdn)
+ self.hostname = self.fqdn
+
+ if self.params["displayname"]:
+ self.module.debug("Display name is " + self.params["displayname"])
+ self.displayname = self.params['displayname']
+ else:
+ self.module.debug("No display name specified. Using " + self.fqdn)
+ self.displayname = self.fqdn
+
+ # Attempt to host information via display name of host name
+ self.module.debug("Attempting to find host by displayname " +
+ self.displayname)
+ info = self.get_host_by_displayname(self.displayname)
+
+ if info is not None:
+ self.module.debug("Host found by displayname")
+ # Used the host information to grab the collector description
+ # if not provided
+ if (not hasattr(self.params, "collector") and
+ "agentDescription" in info):
+ self.module.debug("Setting collector from host response. " +
+ "Collector " + info["agentDescription"])
+ self.params["collector"] = info["agentDescription"]
+ else:
+ self.module.debug("Host not found by displayname")
+
+ # At this point, a valid collector description is required for success
+ # Check that the description exists or fail
+ if self.params["collector"]:
+ self.module.debug("Collector specified is " +
+ self.params["collector"])
+ self.collector = (self.get_collector_by_description(
+ self.params["collector"]))
+ else:
+ self.fail(msg="No collector specified.")
+
+ # If the host wasn't found via displayname, attempt by hostname
+ if info is None:
+ self.module.debug("Attempting to find host by hostname " +
+ self.hostname)
+ info = self.get_host_by_hostname(self.hostname, self.collector)
+
+ self.info = info
+
+ def get_properties(self):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Host.get_properties...")
+
+ if self.info:
+ self.module.debug("Making RPC call to 'getHostProperties'")
+ properties_json = (json.loads(self.rpc("getHostProperties",
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("Error: there was an issue retrieving the " +
+ "host properties")
+ self.module.debug(properties_json["errmsg"])
+
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug(
+ "Unable to find LogicMonitor host which matches " +
+ self.displayname + " (" + self.hostname + ")"
+ )
+ return None
+
+ def site_facts(self):
+ """Output current properties information for the Host"""
+ self.module.debug("Running Host.site_facts...")
+
+ if self.info:
+ self.module.debug("Host exists")
+ props = self.get_properties()
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+
+class Hostgroup(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Hostgroup object")
+
+ self.fullpath = self.params["fullpath"]
+ self.info = self.get_group(self.fullpath)
+
+ def get_properties(self, final=False):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Hostgroup.get_properties...")
+
+ if self.info:
+ self.module.debug("Group found")
+
+ self.module.debug("Making RPC call to 'getHostGroupProperties'")
+ properties_json = json.loads(self.rpc(
+ "getHostGroupProperties",
+ {'hostGroupId': self.info["id"],
+ "finalResult": final}))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug("Group not found")
+ return None
+
+ def site_facts(self):
+ """Output current properties information for the Hostgroup"""
+ self.module.debug("Running Hostgroup.site_facts...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Group doesn't exit.")
+
+
+def selector(module):
+ """Figure out which object and which actions
+ to take given the right parameters"""
+
+ if module.params["target"] == "host":
+ target = Host(module.params, module)
+ target.site_facts()
+ elif module.params["target"] == "hostgroup":
+ # Validate target specific required parameters
+ if module.params["fullpath"] is not None:
+ target = Hostgroup(module.params, module)
+ target.site_facts()
+ else:
+ module.fail_json(
+ msg="Parameter 'fullpath' required for target 'hostgroup'")
+ else:
+ module.fail_json(
+ msg="Error: Unexpected target \"" + module.params["target"] +
+ "\" was specified.")
+
+
+def main():
+ TARGETS = [
+ "host",
+ "hostgroup"]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(required=True, default=None, choices=TARGETS),
+ company=dict(required=True, default=None),
+ user=dict(required=True, default=None),
+ password=dict(required=True, default=None, no_log=True),
+
+ collector=dict(require=False, default=None),
+ hostname=dict(required=False, default=None),
+ displayname=dict(required=False, default=None),
+ fullpath=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_LIB_JSON is not True:
+ module.fail_json(msg="Unable to load JSON library")
+
+ selector(module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.urls import open_url
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/monit.py b/lib/ansible/modules/extras/monitoring/monit.py
new file mode 100644
index 0000000000..2983d5e49a
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/monit.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+import time
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit)
+version_added: "1.2"
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage
+ required: true
+ default: null
+ state:
+ description:
+ - The state of service
+ required: true
+ default: null
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ required: false
+ default: 300
+ version_added: "2.1"
+requirements: [ ]
+author: "Darryl Stoflet (@dstoflet)"
+'''
+
+EXAMPLES = '''
+# Manage the state of program "httpd" to be in "started" state.
+- monit: name=httpd state=started
+'''
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ MONIT = module.get_bin_path('monit', True)
+
+ def status():
+ """Return the status of the process in monit, or the empty string if not present."""
+ rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
+ for line in out.split('\n'):
+ # Sample output lines:
+ # Process 'name' Running
+ # Process 'name' Running - restart pending
+ parts = line.split()
+ if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
+ return ' '.join(parts[2:]).lower()
+ else:
+ return ''
+
+ def run_command(command):
+ """Runs a monit command, and returns the new status."""
+ module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
+ return status()
+
+ def wait_for_monit_to_stop_pending():
+ """Fails this run if there is no status or it's pending/initalizing for timeout"""
+ timeout_time = time.time() + timeout
+ sleep_time = 5
+
+ running_status = status()
+ while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
+ if time.time() >= timeout_time:
+ module.fail_json(
+ msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
+ running_status
+ ),
+ state=state
+ )
+
+ time.sleep(sleep_time)
+ running_status = status()
+
+ if state == 'reloaded':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command('%s reload' % MONIT)
+ if rc != 0:
+ module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
+ wait_for_monit_to_stop_pending()
+ module.exit_json(changed=True, name=name, state=state)
+
+ present = status() != ''
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
+
+ if state == 'present':
+ if not present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('reload')
+ if status == '':
+ wait_for_monit_to_stop_pending()
+ module.exit_json(changed=True, name=name, state=state)
+ module.exit_json(changed=False, name=name, state=state)
+
+ wait_for_monit_to_stop_pending()
+ running = 'running' in status()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('stop')
+ if status in ['not monitored'] or 'stop pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not stopped' % name, status=status)
+
+ if running and state == 'unmonitored':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('unmonitor')
+ if status in ['not monitored'] or 'unmonitor pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not unmonitored' % name, status=status)
+
+ elif state == 'restarted':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('restart')
+ if status in ['initializing', 'running'] or 'restart pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not restarted' % name, status=status)
+
+ elif not running and state == 'started':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('start')
+ if status in ['initializing', 'running'] or 'start pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not started' % name, status=status)
+
+ elif not running and state == 'monitored':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('monitor')
+ if status not in ['not monitored']:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not monitored' % name, status=status)
+
+ module.exit_json(changed=False, name=name, state=state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/nagios.py b/lib/ansible/modules/extras/monitoring/nagios.py
new file mode 100644
index 0000000000..689e9f0903
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/nagios.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license version 2 or any later version.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications.
+description:
+ - "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+ - When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
+version_added: "0.7"
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime" ]
+ host:
+ description:
+ - Host to operate on in Nagios.
+ required: false
+ default: null
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ required: false
+ default: auto-detected
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only usable with the C(downtime) action.
+ required: false
+ default: Ansible
+ comment:
+ version_added: "2.0"
+ description:
+ - Comment for C(downtime) action.
+ required: false
+ default: Scheduling downtime
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ required: false
+ default: 30
+ services:
+ description:
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ C(service) is an alias for C(services).
+ B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
+ aliases: [ "service" ]
+ required: true
+ servicegroup:
+ version_added: "2.0"
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ required: true
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+# set 30 minutes of apache downtime
+- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }}
+
+# schedule an hour of HOST downtime
+- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
+
+# schedule an hour of HOST downtime, with a comment describing the reason
+- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
+ comment='This host needs disciplined'
+
+# schedule downtime for ALL services on HOST
+- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }}
+
+# schedule downtime for a few services
+- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }}
+
+# set 30 minutes downtime for all services in servicegroup foo
+- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
+
+# set 30 minutes downtime for all host in servicegroup foo
+- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
+
+# delete all downtime for a given host
+- nagios: action=delete_downtime host={{ inventory_hostname }} service=all
+
+# delete all downtime for HOST with a particular comment
+- nagios: action=delete_downtime host={{ inventory_hostname }} service=host comment="Planned maintenance"
+
+# enable SMART disk alerts
+- nagios: action=enable_alerts service=smart host={{ inventory_hostname }}
+
+# "two services at once: disable httpd and nfs alerts"
+- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }}
+
+# disable HOST alerts
+- nagios: action=disable_alerts service=host host={{ inventory_hostname }}
+
+# silence ALL alerts
+- nagios: action=silence host={{ inventory_hostname }}
+
+# unsilence all alerts
+- nagios: action=unsilence host={{ inventory_hostname }}
+
+# SHUT UP NAGIOS
+- nagios: action=silence_nagios
+
+# ANNOY ME NAGIOS
+- nagios: action=unsilence_nagios
+
+# command something
+- nagios: action=command command='DISABLE_FAILURE_PREDICTION'
+'''
+
+import ConfigParser
+import types
+import time
+import os.path
+
+######################################################################
+
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+######################################################################
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ ]
+
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, default=None, choices=ACTION_CHOICES),
+ author=dict(default='Ansible'),
+ comment=dict(default='Scheduling downtime'),
+ host=dict(required=False, default=None),
+ servicegroup=dict(required=False, default=None),
+ minutes=dict(default=30),
+ cmdfile=dict(default=which_cmdfile()),
+ services=dict(default=None, aliases=['service']),
+ command=dict(required=False, default=None),
+ )
+ )
+
+ action = module.params['action']
+ host = module.params['host']
+ servicegroup = module.params['servicegroup']
+ minutes = module.params['minutes']
+ services = module.params['services']
+ cmdfile = module.params['cmdfile']
+ command = module.params['command']
+
+ ##################################################################
+ # Required args per action:
+ # downtime = (minutes, service, host)
+ # (un)silence = (host)
+ # (enable/disable)_alerts = (service, host)
+ # command = command
+ #
+ # AnsibleModule will verify most stuff, we need to verify
+ # 'minutes' and 'service' manually.
+
+ ##################################################################
+ if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
+ if not host:
+ module.fail_json(msg='no host specified for action requiring one')
+ ######################################################################
+ if action == 'downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+ # Make sure minutes is a number
+ try:
+ m = int(minutes)
+ if not isinstance(m, types.IntType):
+ module.fail_json(msg='minutes must be a number')
+ except Exception:
+ module.fail_json(msg='invalid entry for minutes')
+
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+
+ if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
+ # Make sure there's an actual servicegroup selected
+ if not servicegroup:
+ module.fail_json(msg='no servicegroup selected to set downtime for')
+ # Make sure minutes is a number
+ try:
+ m = int(minutes)
+ if not isinstance(m, types.IntType):
+ module.fail_json(msg='minutes must be a number')
+ except Exception:
+ module.fail_json(msg='invalid entry for minutes')
+
+ ##################################################################
+ if action in ['enable_alerts', 'disable_alerts']:
+ if not services:
+ module.fail_json(msg='a service is required when setting alerts')
+
+ if action in ['command']:
+ if not command:
+ module.fail_json(msg='no command passed for command action')
+ ##################################################################
+ if not cmdfile:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ##################################################################
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+ ##################################################################
+
+
+######################################################################
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ self.minutes = int(kwargs['minutes'])
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ try:
+ fp = open(self.cmdfile, 'w')
+ fp.write(cmd)
+ fp.flush()
+ fp.close()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, self.minutes)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, self.minutes)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes)
+
+ elif self.action == 'delete_downtime':
+ if self.services=='host':
+ self.delete_host_downtime(self.host)
+ elif self.services=='all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" % \
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+######################################################################
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/newrelic_deployment.py b/lib/ansible/modules/extras/monitoring/newrelic_deployment.py
new file mode 100644
index 0000000000..3d9bc6c0ec
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/newrelic_deployment.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+version_added: "1.2"
+author: "Matt Coddington (@mcodd)"
+short_description: Notify newrelic about app deployments
+description:
+ - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
+options:
+ token:
+ description:
+ - API token, to place in the x-api-key header.
+ required: true
+ app_name:
+ description:
+ - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
+ required: false
+ application_id:
+ description:
+ - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
+ required: false
+ changelog:
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: false
+ user:
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ description:
+ - Name of the application
+ required: false
+ environment:
+ description:
+ - The environment for this deployment
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- newrelic_deployment: token=AAAAAA
+ app_name=myapp
+ user='ansible deployment'
+ revision=1.0
+'''
+
+import urllib
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=False),
+ user=dict(required=False),
+ appname=dict(required=False),
+ environment=dict(required=False),
+ validate_certs = dict(default='yes', type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ if module.params["app_name"]:
+ params["app_name"] = module.params["app_name"]
+ elif module.params["application_id"]:
+ params["application_id"] = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to NewRelic
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urllib.urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/monitoring/pagerduty.py b/lib/ansible/modules/extras/monitoring/pagerduty.py
new file mode 100644
index 0000000000..99a9be8a04
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/pagerduty.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+version_added: "1.2"
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns"
+ - "Bruce Pennypacker"
+requirements:
+ - PagerDuty API access
+options:
+ state:
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ default: null
+ choices: [ "running", "started", "ongoing", "absent" ]
+ aliases: []
+ name:
+ description:
+ - PagerDuty unique subdomain.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ user:
+ description:
+ - PagerDuty user ID.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ passwd:
+ description:
+ - PagerDuty user password.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ token:
+ description:
+ - A pagerduty token, generated on the pagerduty site. Can be used instead of
+ user/passwd combination.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ version_added: '1.8'
+ requester_id:
+ description:
+ - ID of user making the request. Only needed when using a token and creating a maintenance_window.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ version_added: '1.8'
+ service:
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ required: false
+ default: null
+ choices: []
+ aliases: [ services ]
+ hours:
+ description:
+ - Length of maintenance window in hours.
+ required: false
+ default: 1
+ choices: []
+ aliases: []
+ minutes:
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ required: false
+ default: 0
+ choices: []
+ aliases: []
+ version_added: '1.8'
+ desc:
+ description:
+ - Short description of maintenance window.
+ required: false
+ default: Created by Ansible
+ choices: []
+ aliases: []
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+'''
+
+EXAMPLES='''
+# List ongoing maintenance windows using a user/passwd
+- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
+
+# List ongoing maintenance windows using a token
+- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
+
+# Create a 1 hour maintenance window for service FOO123, using a user/passwd
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=running
+ service=FOO123
+
+# Create a 5 minute maintenance window for service FOO123, using a token
+- pagerduty: name=companyabc
+ token=xxxxxxxxxxxxxx
+ hours=0
+ minutes=5
+ state=running
+ service=FOO123
+
+
+# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=running
+ service=FOO123
+ hours=4
+ desc=deployment
+ register: pd_window
+
+# Delete the previous maintenance window
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=absent
+ service={{ pd_window.result.maintenance_window.id }}
+'''
+
+import datetime
+import base64
+
+def auth_header(user, passwd, token):
+ if token:
+ return "Token token=%s" % token
+
+ auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
+ return "Basic %s" % auth
+
+def ongoing(module, name, user, passwd, token):
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
+ headers = {"Authorization": auth_header(user, passwd, token)}
+
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
+ headers = {
+ 'Authorization': auth_header(user, passwd, token),
+ 'Content-Type' : 'application/json',
+ }
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
+
+ if requester_id:
+ request_data['requester_id'] = requester_id
+ else:
+ if token:
+ module.fail_json(msg="requester_id is required when using a token")
+
+ data = json.dumps(request_data)
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, True
+
+def absent(module, name, user, passwd, token, requester_id, service):
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
+ headers = {
+ 'Authorization': auth_header(user, passwd, token),
+ 'Content-Type' : 'application/json',
+ }
+ request_data = {}
+
+ if requester_id:
+ request_data['requester_id'] = requester_id
+ else:
+ if token:
+ module.fail_json(msg="requester_id is required when using a token")
+
+ data = json.dumps(request_data)
+ response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, True
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=True),
+ user=dict(required=False),
+ passwd=dict(required=False),
+ token=dict(required=False),
+ service=dict(required=False, type='list', aliases=["services"]),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False),
+ minutes=dict(default='0', required=False),
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ passwd = module.params['passwd']
+ token = module.params['token']
+ service = module.params['service']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ if not token and not (user or passwd):
+ module.fail_json(msg="neither user and passwd nor token specified")
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed=True
+
+ if state == "ongoing":
+ (rc, out, changed) = ongoing(module, name, user, passwd, token)
+
+ if state == "absent":
+ (rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/pagerduty_alert.py b/lib/ansible/modules/extras/monitoring/pagerduty_alert.py
new file mode 100644
index 0000000000..e2d127f015
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/pagerduty_alert.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+version_added: "1.9"
+author:
+ - "Amanpreet Singh (@aps-sids)"
+requirements:
+ - PagerDuty API access
+options:
+ name:
+ description:
+ - PagerDuty unique subdomain.
+ required: true
+ service_key:
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "service key" listed on a Generic API's service detail page.
+ required: true
+ state:
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+# Trigger an incident with just the basic options
+- pagerduty_alert:
+ name: companyabc
+ service_key=xxx
+ api_key:yourapikey
+ state=triggered
+ desc="problem that led to this trigger"
+
+# Trigger an incident with more options
+- pagerduty_alert:
+ service_key=xxx
+ api_key=yourapikey
+ state=triggered
+ desc="problem that led to this trigger"
+ incident_key=somekey
+ client="Sample Monitoring Service"
+ client_url=http://service.example.com
+
+# Acknowledge an incident based on incident_key
+- pagerduty_alert:
+ service_key=xxx
+ api_key=yourapikey
+ state=acknowledged
+ incident_key=somekey
+ desc="some text for incident's log"
+
+# Resolve an incident based on incident_key
+- pagerduty_alert:
+ service_key=xxx
+ api_key=yourapikey
+ state=resolved
+ incident_key=somekey
+ desc="some text for incident's log"
+'''
+
+
+def check(module, name, state, service_key, api_key, incident_key=None):
+ url = "https://%s.pagerduty.com/api/v1/incidents" % name
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key
+ }
+
+ data = {
+ "service_key": service_key,
+ "incident_key": incident_key,
+ "sort_by": "incident_number:desc"
+ }
+
+ response, info = fetch_url(module, url, method='get',
+ headers=headers, data=json.dumps(data))
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+ json_out = json.loads(response.read())["incidents"][0]
+
+ if state != json_out["status"]:
+ return json_out, True
+ return json_out, False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service_key=dict(required=True),
+ api_key=dict(required=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state,
+ service_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, service_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/pingdom.py b/lib/ansible/modules/extras/monitoring/pingdom.py
new file mode 100644
index 0000000000..4346e8ca6f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/pingdom.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+version_added: "1.2"
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+options:
+ state:
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ default: null
+ choices: [ "running", "paused" ]
+ aliases: []
+ checkid:
+ description:
+ - Pingdom ID of the check.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ uid:
+ description:
+ - Pingdom user ID.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ passwd:
+ description:
+ - Pingdom user password.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ key:
+ description:
+ - Pingdom API key.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+# Pause the check with the ID of 12345.
+- pingdom: uid=example@example.com
+ passwd=password123
+ key=apipassword123
+ checkid=12345
+ state=paused
+
+# Unpause the check with the ID of 12345.
+- pingdom: uid=example@example.com
+ passwd=password123
+ key=apipassword123
+ checkid=12345
+ state=running
+'''
+
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except:
+ HAS_PINGDOM = False
+
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ #if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ #if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True),
+ key=dict(required=True)
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg="Missing required pingdom module (check docs)")
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/rollbar_deployment.py b/lib/ansible/modules/extras/monitoring/rollbar_deployment.py
new file mode 100644
index 0000000000..060193b78a
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/rollbar_deployment.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+version_added: 1.6
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+'''
+
+EXAMPLES = '''
+- rollbar_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=4.2,
+ rollbar_user='admin',
+ comment='Test Deploy'
+'''
+
+import urllib
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception, e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % e)
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/sensu_check.py b/lib/ansible/modules/extras/monitoring/sensu_check.py
new file mode 100644
index 0000000000..7cf3850966
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/sensu_check.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+version_added: 2.0
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ required: false
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ command:
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ required: true
+ handlers:
+ description:
+ - List of handlers to notify when the check fails
+ required: false
+ default: []
+ subscribers:
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ required: false
+ default: []
+ interval:
+ description:
+ - Check interval in seconds
+ required: false
+ default: null
+ timeout:
+ description:
+ - Timeout for the check
+ required: false
+ default: 10
+ handle:
+ description:
+ - Whether the check should be handled or not
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+ subdue_begin:
+ description:
+ - When to disable handling of check failures
+ required: false
+ default: null
+ subdue_end:
+ description:
+ - When to enable handling of check failures
+ required: false
+ default: null
+ dependencies:
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ required: false
+ default: []
+ metric:
+ description:
+ - Whether the check is a metric
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+ occurrences:
+ description:
+ - Number of event occurrences before the handler should take action
+ required: false
+ default: 1
+ refresh:
+ description:
+ - Number of seconds handlers should wait before taking second action
+ required: false
+ default: null
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ low_flap_threshold:
+ description:
+ - The low threshhold for flap detection
+ required: false
+ default: null
+ high_flap_threshold:
+ description:
+ - The high threshhold for flap detection
+ required: false
+ default: null
+ custom:
+ version_added: "2.1"
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ required: false
+ default: {}
+ source:
+ version_added: "2.1"
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+ required: false
+ default: null
+requirements: [ ]
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: get cpu metrics
+ sensu_check: name=cpu_load
+ command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric=yes handlers=relay subscribers=common interval=60
+
+# Check whether nginx is running
+- name: check nginx process
+ sensu_check: name=nginx_running
+ command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
+ handlers=default subscribers=nginx interval=60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: check disk
+ sensu_check: name=check_disk_capacity state=absent
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError, e:
+ if e.errno is 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=str(e))
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k,v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError, e:
+ module.fail_json(msg=str(e))
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/sensu_subscription.py b/lib/ansible/modules/extras/monitoring/sensu_subscription.py
new file mode 100644
index 0000000000..192b474ee4
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/sensu_subscription.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+version_added: 2.2
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ description:
+ - The name of the channel
+ required: true
+ state:
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the moule changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: subscribe to nginx checks
+ sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: unsubscribe from common checks
+ sensu_subscription: name=common state=absent
+'''
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ import json
+ except ImportError:
+ import simplejson as json
+
+ try:
+ config = json.load(open(path))
+ except IOError:
+ e = get_exception()
+ if e.errno is 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=str(e))
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e)))
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'str', 'default': 'no', 'type': 'bool'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/stackdriver.py b/lib/ansible/modules/extras/monitoring/stackdriver.py
new file mode 100644
index 0000000000..25af77ec26
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/stackdriver.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+version_added: "1.6"
+author: "Ben Whaley (@bwhaley)"
+options:
+ key:
+ description:
+ - API key.
+ required: true
+ default: null
+ event:
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: false
+ default: null
+ revision_id:
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ required: false
+ default: null
+ deployed_by:
+ description:
+ - The person or robot responsible for deploying the code
+ required: false
+ default: "Ansible"
+ deployed_to:
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ required: false
+ default: null
+ repository:
+ description:
+ - The repository (or project) deployed
+ required: false
+ default: null
+ msg:
+ description:
+ - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation.
+ required: false
+ default: null
+ annotated_by:
+ description:
+ - The person or robot who the annotation should be attributed to.
+ required: false
+ default: "Ansible"
+ level:
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ required: false
+ default: 'INFO'
+ instance_id:
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ required: false
+ default: null
+ event_epoch:
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123
+
+- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ key=dict(required=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(),
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception, e:
+ module.fail_json(msg="unable to sent deploy event: %s" % e)
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception, e:
+ module.fail_json(msg="unable to sent annotation event: %s" % e)
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/statusio_maintenance.py b/lib/ansible/modules/extras/monitoring/statusio_maintenance.py
new file mode 100644
index 0000000000..c2b93db5c9
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/statusio_maintenance.py
@@ -0,0 +1,480 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+version_added: "2.2"
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+options:
+ title:
+ description:
+ - A descriptive title for the maintenance window
+ required: false
+ default: "A new maintenance window"
+ desc:
+ description:
+ - Message describing the maintenance window
+ required: false
+ default: "Created by Ansible"
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ required: false
+ default: "https://api.status.io"
+ components:
+ description:
+ - The given name of your component (server name)
+ required: false
+ aliases: ['component']
+ default: None
+ containers:
+ description:
+ - The given name of your container (data center)
+ required: false
+ aliases: ['container']
+ default: None
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ required: false
+ default: false
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ required: false
+ default: false
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ required: false
+ default: false
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ required: false
+ default: false
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ required: false
+ default: false
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ required: false
+ default: false
+ maintenance_id:
+ description:
+ - The maintenance id number when deleting a maintenance window
+ required: false
+ default: None
+ minutes:
+ description:
+ - The length of time in UTC that the maintenance will run \
+ (starting from playbook runtime)
+ required: false
+ default: 10
+ start_date:
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ required: false
+ default: None
+ start_time:
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Create a maintenance window for 10 minutes on server1.example.com, with
+automation to stop the maintenance.
+- statusio_maintenance:
+ title: "Router Upgrade from ansible"
+ desc: "Performing a Router Upgrade"
+ components: "server1.example.com"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ maintenance_notify_1_hr: true
+ automation: true
+
+# Create a maintenance window for 60 minutes on multiple hosts
+- name: "Create maintenance window for server1 and server2"
+ local_action:
+ module: statusio_maintenance
+ title: "Routine maintenance"
+ desc: "Some security updates"
+ components:
+ - "server1.example.com
+ - "server2.example.com"
+ minutes: "60"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ maintenance_notify_1_hr: true
+ automation: true
+
+# Create a future maintenance window for 24 hours to all hosts inside the
+# Primary Data Center
+- statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: "Primary Data Center"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ start_date: "01/01/2016"
+ start_time: "12:00"
+ minutes: 1440
+
+# Delete a maintenance window
+- statusio_maintenance:
+ title: "Remove a maintenance window"
+ maintenance_id: "561f90faf74bc94a4700087b"
+ statuspage: "statuspage_id"
+ api_id: "api_id"
+ api_key: "api_key"
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except:
+ return 1, None, None, e
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected":
+ str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception:
+ e = get_exception()
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception:
+ e = get_exception()
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/uptimerobot.py b/lib/ansible/modules/extras/monitoring/uptimerobot.py
new file mode 100644
index 0000000000..65d963cda6
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/uptimerobot.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+version_added: "1.9"
+requirements:
+ - Valid Uptime Robot API Key
+options:
+ state:
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ default: null
+ choices: [ "started", "paused" ]
+ aliases: []
+ monitorid:
+ description:
+ - ID of the monitor to check.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ apikey:
+ description:
+ - Uptime Robot API key.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+# Pause the monitor with an ID of 12345.
+- uptimerobot: monitorid=12345
+ apikey=12345-1234512345
+ state=paused
+
+# Start the monitor with an ID of 12345.
+- uptimerobot: monitorid=12345
+ apikey=12345-1234512345
+ state=started
+
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import urllib
+import time
+
+API_BASE = "http://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urllib.urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = req.read()
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urllib.urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = req.read()
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urllib.urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = req.read()
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(required=True, choices=['started', 'paused']),
+ apikey = dict(required=True),
+ monitorid = dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_group.py b/lib/ansible/modules/extras/monitoring/zabbix_group.py
new file mode 100644
index 0000000000..a19c49794f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: zabbix_group
+short_description: Zabbix host groups creates/deletes
+description:
+ - Create host groups if they do not exist.
+ - Delete existing host groups if they exist.
+version_added: "1.8"
+author:
+ - "(@cove)"
+ - "Tony Minfei Ding"
+ - "Harrison Gu (@harrisongu)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ C(url) is an alias for C(server_url).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ state:
+ description:
+ - Create or delete host group.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+ timeout:
+ description:
+ - The timeout of API request(seconds).
+ default: 10
+ host_groups:
+ description:
+ - List of host groups to create or delete.
+ required: true
+ aliases: [ "host_group" ]
+notes:
+ - Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
+'''
+
+EXAMPLES = '''
+# Base create host groups example
+- name: Create host groups
+ local_action:
+ module: zabbix_group
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ state: present
+ host_groups:
+ - Example group1
+ - Example group2
+
+# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates
+- name: Create host groups
+ local_action:
+ module: zabbix_group
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ state: present
+ host_groups:
+ - Example group1
+ - Example group2
+ when: inventory_hostname==groups['group_name'][0]
+'''
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+ from zabbix_api import Already_Exists
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+class HostGroup(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # create host group(s) if not exists
+ def create_host_group(self, group_names):
+ try:
+ group_add_list = []
+ for group_name in group_names:
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
+ if not result:
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.hostgroup.create({'name': group_name})
+ group_add_list.append(group_name)
+ except Already_Exists:
+ return group_add_list
+ return group_add_list
+ except Exception, e:
+ self._module.fail_json(msg="Failed to create host group(s): %s" % e)
+
+ # delete host group(s)
+ def delete_host_group(self, group_ids):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.hostgroup.delete(group_ids)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
+
+ # get group ids by name
+ def get_group_ids(self, host_groups):
+ group_ids = []
+
+ group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
+ for group in group_list:
+ group_id = group['groupid']
+ group_ids.append(group_id)
+ return group_ids, group_list
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str',required=False, default=None),
+ http_login_password=dict(type='str',required=False, default=None, no_log=True),
+ host_groups=dict(type='list', required=True, aliases=['host_group']),
+ state=dict(default="present", choices=['present','absent']),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ host_groups = module.params['host_groups']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ zbx = None
+
+ # login to zabbix
+ try:
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ hostGroup = HostGroup(module, zbx)
+
+ group_ids = []
+ group_list = []
+ if host_groups:
+ group_ids, group_list = hostGroup.get_group_ids(host_groups)
+
+ if state == "absent":
+ # delete host groups
+ if group_ids:
+ delete_group_names = []
+ hostGroup.delete_host_group(group_ids)
+ for group in group_list:
+ delete_group_names.append(group['name'])
+ module.exit_json(changed=True,
+ result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
+ else:
+ module.exit_json(changed=False, result="No host group(s) to delete.")
+ else:
+ # create host groups
+ group_add_list = hostGroup.create_host_group(host_groups)
+ if len(group_add_list) > 0:
+ module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
+ else:
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_host.py b/lib/ansible/modules/extras/monitoring/zabbix_host.py
new file mode 100644
index 0000000000..20d8b6e21f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_host.py
@@ -0,0 +1,562 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: zabbix_host
+short_description: Zabbix host creates/updates/deletes
+description:
+ - This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
+version_added: "2.0"
+author:
+ - "(@cove)"
+ - "Tony Minfei Ding"
+ - "Harrison Gu (@harrisongu)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name, used to authenticate against the server.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ host_name:
+ description:
+ - Name of the host in Zabbix.
+ - host_name is the unique identifier used and cannot be updated using this module.
+ required: true
+ host_groups:
+ description:
+ - List of host groups the host is part of.
+ required: false
+ link_templates:
+ description:
+ - List of templates linked to the host.
+ required: false
+ default: None
+ inventory_mode:
+ description:
+ - Configure the inventory mode.
+ choices: ['automatic', 'manual', 'disabled']
+ required: false
+ default: None
+ version_added: '2.1'
+ status:
+ description:
+ - Monitoring status of the host.
+ required: false
+ choices: ['enabled', 'disabled']
+ default: "enabled"
+ state:
+ description:
+ - State of the host.
+ - On C(present), it will create if host does not exist or update the host if the associated data is different.
+ - On C(absent) will remove a host if it exists.
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ proxy:
+ description:
+ - The name of the Zabbix Proxy to be used
+ default: None
+ interfaces:
+ description:
+ - List of interfaces to be created for the host (see example below).
+ - 'Available values are: dns, ip, main, port, type and useip.'
+ - Please review the interface documentation for more information on the supported properties
+ - 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
+ required: false
+ default: []
+ force:
+ description:
+ - Overwrite the host configuration, even if already present
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "2.0"
+'''
+
+EXAMPLES = '''
+- name: Create a new host or update an existing host's info
+ local_action:
+ module: zabbix_host
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ host_name: ExampleHost
+ host_groups:
+ - Example group1
+ - Example group2
+ link_templates:
+ - Example template1
+ - Example template2
+ status: enabled
+ state: present
+ inventory_mode: automatic
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.xx.xx.xx
+ dns: ""
+ port: 10050
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.xx.xx.xx
+ dns: ""
+ port: 12345
+ proxy: a.zabbix.proxy
+'''
+
+import logging
+import copy
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+# Extend the ZabbixAPI
+# Since the zabbix-api python module too old (version 1.0, no higher version so far),
+# it does not support the 'hostinterface' api calls,
+# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
+class ZabbixAPIExtends(ZabbixAPI):
+ hostinterface = None
+
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
+ self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
+
+
+class Host(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # exist host
+ def is_host_exist(self, host_name):
+ result = self._zapi.host.get({'filter': {'host': host_name}})
+ return result
+
+ # check if host group exists
+ def check_host_group_exist(self, group_names):
+ for group_name in group_names:
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
+ if not result:
+ self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
+ return True
+
+ def get_template_ids(self, template_list):
+ template_ids = []
+ if template_list is None or len(template_list) == 0:
+ return template_ids
+ for template in template_list:
+ template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
+ if len(template_list) < 1:
+ self._module.fail_json(msg="Template not found: %s" % template)
+ else:
+ template_id = template_list[0]['templateid']
+ template_ids.append(template_id)
+ return template_ids
+
+ def add_host(self, host_name, group_ids, status, interfaces, proxy_id):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ host_list = self._zapi.host.create(parameters)
+ if len(host_list) >= 1:
+ return host_list['hostids'][0]
+ except Exception, e:
+ self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
+
+ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ self._zapi.host.update(parameters)
+ interface_list_copy = exist_interface_list
+ if interfaces:
+ for interface in interfaces:
+ flag = False
+ interface_str = interface
+ for exist_interface in exist_interface_list:
+ interface_type = interface['type']
+ exist_interface_type = int(exist_interface['type'])
+ if interface_type == exist_interface_type:
+ # update
+ interface_str['interfaceid'] = exist_interface['interfaceid']
+ self._zapi.hostinterface.update(interface_str)
+ flag = True
+ interface_list_copy.remove(exist_interface)
+ break
+ if not flag:
+ # add
+ interface_str['hostid'] = host_id
+ self._zapi.hostinterface.create(interface_str)
+ # remove
+ remove_interface_ids = []
+ for remove_interface in interface_list_copy:
+ interface_id = remove_interface['interfaceid']
+ remove_interface_ids.append(interface_id)
+ if len(remove_interface_ids) > 0:
+ self._zapi.hostinterface.delete(remove_interface_ids)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
+
+ def delete_host(self, host_id, host_name):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.delete([host_id])
+ except Exception, e:
+ self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
+
+ # get host by host name
+ def get_host_by_host_name(self, host_name):
+ host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
+ if len(host_list) < 1:
+ self._module.fail_json(msg="Host not found: %s" % host_name)
+ else:
+ return host_list[0]
+
+ # get proxyid by proxy name
+ def get_proxyid_by_proxy_name(self, proxy_name):
+ proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
+ if len(proxy_list) < 1:
+ self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
+ else:
+ return proxy_list[0]['proxyid']
+
+ # get group ids by group names
+ def get_group_ids_by_group_names(self, group_names):
+ group_ids = []
+ if self.check_host_group_exist(group_names):
+ group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
+ for group in group_list:
+ group_id = group['groupid']
+ group_ids.append({'groupid': group_id})
+ return group_ids
+
+ # get host templates by host id
+ def get_host_templates_by_host_id(self, host_id):
+ template_ids = []
+ template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
+ for template in template_list:
+ template_ids.append(template['templateid'])
+ return template_ids
+
+ # get host groups by host id
+ def get_host_groups_by_host_id(self, host_id):
+ exist_host_groups = []
+ host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
+
+ if len(host_groups_list) >= 1:
+ for host_groups_name in host_groups_list:
+ exist_host_groups.append(host_groups_name['name'])
+ return exist_host_groups
+
+ # check the exist_interfaces whether it equals the interfaces or not
+ def check_interface_properties(self, exist_interface_list, interfaces):
+ interfaces_port_list = []
+
+ if interfaces is not None:
+ if len(interfaces) >= 1:
+ for interface in interfaces:
+ interfaces_port_list.append(int(interface['port']))
+
+ exist_interface_ports = []
+ if len(exist_interface_list) >= 1:
+ for exist_interface in exist_interface_list:
+ exist_interface_ports.append(int(exist_interface['port']))
+
+ if set(interfaces_port_list) != set(exist_interface_ports):
+ return True
+
+ for exist_interface in exist_interface_list:
+ exit_interface_port = int(exist_interface['port'])
+ for interface in interfaces:
+ interface_port = int(interface['port'])
+ if interface_port == exit_interface_port:
+ for key in interface.keys():
+ if str(exist_interface[key]) != str(interface[key]):
+ return True
+
+ return False
+
+ # get the status of host by host
+ def get_host_status_by_host(self, host):
+ return host['status']
+
+ # check all the properties before link or clear template
+ def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
+ exist_interfaces, host, proxy_id):
+ # get the existing host's groups
+ exist_host_groups = self.get_host_groups_by_host_id(host_id)
+ if set(host_groups) != set(exist_host_groups):
+ return True
+
+ # get the existing status
+ exist_status = self.get_host_status_by_host(host)
+ if int(status) != int(exist_status):
+ return True
+
+ # check the exist_interfaces whether it equals the interfaces or not
+ if self.check_interface_properties(exist_interfaces, interfaces):
+ return True
+
+ # get the existing templates
+ exist_template_ids = self.get_host_templates_by_host_id(host_id)
+ if set(list(template_ids)) != set(exist_template_ids):
+ return True
+
+ if proxy_id is not None:
+ if host['proxy_hostid'] != proxy_id:
+ return True
+
+ return False
+
+ # link or clear template of the host
+ def link_or_clear_template(self, host_id, template_id_list):
+ # get host's exist template ids
+ exist_template_id_list = self.get_host_templates_by_host_id(host_id)
+
+ exist_template_ids = set(exist_template_id_list)
+ template_ids = set(template_id_list)
+ template_id_list = list(template_ids)
+
+ # get unlink and clear templates
+ templates_clear = exist_template_ids.difference(template_ids)
+ templates_clear_list = list(templates_clear)
+ request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to link template to host: %s" % e)
+
+ # Update the host inventory_mode
+ def update_inventory_mode(self, host_id, inventory_mode):
+
+ # nothing was set, do nothing
+ if not inventory_mode:
+ return
+
+ if inventory_mode == "automatic":
+ inventory_mode = int(1)
+ elif inventory_mode == "manual":
+ inventory_mode = int(0)
+ elif inventory_mode == "disabled":
+ inventory_mode = int(-1)
+
+ # watch for - https://support.zabbix.com/browse/ZBX-6033
+ request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ host_name=dict(type='str', required=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ host_groups=dict(type='list', required=False),
+ link_templates=dict(type='list', required=False),
+ status=dict(default="enabled", choices=['enabled', 'disabled']),
+ state=dict(default="present", choices=['present', 'absent']),
+ inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
+ timeout=dict(type='int', default=10),
+ interfaces=dict(type='list', required=False),
+ force=dict(type='bool', default=True),
+ proxy=dict(type='str', required=False)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ host_name = module.params['host_name']
+ host_groups = module.params['host_groups']
+ link_templates = module.params['link_templates']
+ inventory_mode = module.params['inventory_mode']
+ status = module.params['status']
+ state = module.params['state']
+ timeout = module.params['timeout']
+ interfaces = module.params['interfaces']
+ force = module.params['force']
+ proxy = module.params['proxy']
+
+ # convert enabled to 0; disabled to 1
+ status = 1 if status == "disabled" else 0
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ host = Host(module, zbx)
+
+ template_ids = []
+ if link_templates:
+ template_ids = host.get_template_ids(link_templates)
+
+ group_ids = []
+
+ if host_groups:
+ group_ids = host.get_group_ids_by_group_names(host_groups)
+
+ ip = ""
+ if interfaces:
+ for interface in interfaces:
+ if interface['type'] == 1:
+ ip = interface['ip']
+
+ # check if host exist
+ is_host_exist = host.is_host_exist(host_name)
+
+ if is_host_exist:
+ # Use proxy specified, or set to None when updating host
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = None
+
+ # get host id by host name
+ zabbix_host_obj = host.get_host_by_host_name(host_name)
+ host_id = zabbix_host_obj['hostid']
+
+ if state == "absent":
+ # remove host
+ host.delete_host(host_id, host_name)
+ module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
+ else:
+ if not group_ids:
+ module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
+
+ if not force:
+ module.fail_json(changed=False, result="Host present, Can't update configuration without force")
+
+ # get exist host's interfaces
+ exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
+ exist_interfaces_copy = copy.deepcopy(exist_interfaces)
+
+ # update host
+ interfaces_len = len(interfaces) if interfaces else 0
+
+ if len(exist_interfaces) > interfaces_len:
+ if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
+ exist_interfaces, zabbix_host_obj, proxy_id):
+ host.link_or_clear_template(host_id, template_ids)
+ host.update_host(host_name, group_ids, status, host_id,
+ interfaces, exist_interfaces, proxy_id)
+ module.exit_json(changed=True,
+ result="Successfully update host %s (%s) and linked with template '%s'"
+ % (host_name, ip, link_templates))
+ else:
+ module.exit_json(changed=False)
+ else:
+ if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
+ exist_interfaces_copy, zabbix_host_obj, proxy_id):
+ host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id)
+ host.link_or_clear_template(host_id, template_ids)
+ host.update_inventory_mode(host_id, inventory_mode)
+ module.exit_json(changed=True,
+ result="Successfully update host %s (%s) and linked with template '%s'"
+ % (host_name, ip, link_templates))
+ else:
+ module.exit_json(changed=False)
+ else:
+ if state == "absent":
+ # the host is already deleted.
+ module.exit_json(changed=False)
+
+ # Use proxy specified, or set to 0 when adding new host
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = 0
+
+ if not group_ids:
+ module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
+
+ if not interfaces or (interfaces and len(interfaces) == 0):
+ module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
+
+ # create host
+ host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id)
+ host.link_or_clear_template(host_id, template_ids)
+ host.update_inventory_mode(host_id, inventory_mode)
+ module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
+ host_name, ip, link_templates))
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py b/lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py
new file mode 100644
index 0000000000..c0e3f8c228
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: zabbix_hostmacro
+short_description: Zabbix host macro creates/updates/deletes
+description:
+ - manages Zabbix host macros, it can create, update or delete them.
+version_added: "2.0"
+author:
+ - "(@cave)"
+ - Dean Hailin Song
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ host_name:
+ description:
+ - Name of the host.
+ required: true
+ macro_name:
+ description:
+ - Name of the host macro.
+ required: true
+ macro_value:
+ description:
+ - Value of the host macro.
+ required: true
+ state:
+ description:
+ - State of the macro.
+ - On C(present), it will create if macro does not exist or update the macro if the associated data is different.
+ - On C(absent) will remove a macro if it exists.
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+'''
+
+EXAMPLES = '''
+- name: Create a new host macro or update an existing macro's value
+ local_action:
+ module: zabbix_hostmacro
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ host_name: ExampleHost
+ macro_name:Example macro
+ macro_value:Example value
+ state: present
+'''
+
+import logging
+import copy
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+# Extend the ZabbixAPI
+# Since the zabbix-api python module too old (version 1.0, no higher version so far).
+class ZabbixAPIExtends(ZabbixAPI):
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
+
+
+class HostMacro(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # get host id by host name
+ def get_host_id(self, host_name):
+ try:
+ host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}})
+ if len(host_list) < 1:
+ self._module.fail_json(msg="Host not found: %s" % host_name)
+ else:
+ host_id = host_list[0]['hostid']
+ return host_id
+ except Exception, e:
+ self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
+
+ # get host macro
+ def get_host_macro(self, macro_name, host_id):
+ try:
+ host_macro_list = self._zapi.usermacro.get(
+ {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}})
+ if len(host_macro_list) > 0:
+ return host_macro_list[0]
+ return None
+ except Exception, e:
+ self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
+
+ # create host macro
+ def create_host_macro(self, macro_name, macro_value, host_id):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
+ self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
+
+ # update host macro
+ def update_host_macro(self, host_macro_obj, macro_name, macro_value):
+ host_macro_id = host_macro_obj['hostmacroid']
+ if host_macro_obj['macro'] == '{$'+macro_name+'}' and host_macro_obj['value'] == macro_value:
+ self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name)
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
+ self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
+
+ # delete host macro
+ def delete_host_macro(self, host_macro_obj, macro_name):
+ host_macro_id = host_macro_obj['hostmacroid']
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.usermacro.delete([host_macro_id])
+ self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ host_name=dict(type='str', required=True),
+ macro_name=dict(type='str', required=True),
+ macro_value=dict(type='str', required=True),
+ state=dict(default="present", choices=['present', 'absent']),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ host_name = module.params['host_name']
+ macro_name = (module.params['macro_name']).upper()
+ macro_value = module.params['macro_value']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ host_macro_class_obj = HostMacro(module, zbx)
+
+ changed = False
+
+ if host_name:
+ host_id = host_macro_class_obj.get_host_id(host_name)
+ host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id)
+
+ if state == 'absent':
+ if not host_macro_obj:
+ module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name)
+ else:
+ # delete a macro
+ host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name)
+ else:
+ if not host_macro_obj:
+ # create host macro
+ host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id)
+ else:
+ # update host macro
+ host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_maintenance.py b/lib/ansible/modules/extras/monitoring/zabbix_maintenance.py
new file mode 100644
index 0000000000..89f792ce5d
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_maintenance.py
@@ -0,0 +1,377 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+
+module: zabbix_maintenance
+short_description: Create Zabbix maintenance windows
+description:
+ - This module will let you create Zabbix maintenance windows.
+version_added: "1.8"
+author: "Alexander Bulimov (@abulimov)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ state:
+ description:
+ - Create or remove a maintenance window.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ C(url) is an alias for C(server_url).
+ required: true
+ default: null
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ host_names:
+ description:
+ - Hosts to manage maintenance window for.
+ Separate multiple hosts with commas.
+ C(host_name) is an alias for C(host_names).
+ B(Required) option when C(state) is I(present)
+ and no C(host_groups) specified.
+ required: false
+ default: null
+ aliases: [ "host_name" ]
+ host_groups:
+ description:
+ - Host groups to manage maintenance window for.
+ Separate multiple groups with commas.
+ C(host_group) is an alias for C(host_groups).
+ B(Required) option when C(state) is I(present)
+ and no C(host_names) specified.
+ required: false
+ default: null
+ aliases: [ "host_group" ]
+ minutes:
+ description:
+ - Length of maintenance window in minutes.
+ required: false
+ default: 10
+ name:
+ description:
+ - Unique name of maintenance window.
+ required: true
+ desc:
+ description:
+ - Short description of maintenance window.
+ required: true
+ default: Created by Ansible
+ collect_data:
+ description:
+ - Type of maintenance. With data collection, or without.
+ required: false
+ default: "true"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ version_added: "2.1"
+ required: false
+notes:
+ - Useful for setting hosts in maintenance mode before big update,
+ and removing maintenance window after update.
+ - Module creates maintenance window from now() to now() + minutes,
+ so if Zabbix server's time and host's time are not synchronized,
+ you will get strange results.
+ - Install required module with 'pip install zabbix-api' command.
+ - Checks existance only by maintenance name.
+'''
+
+EXAMPLES = '''
+# Create maintenance window named "Update of www1"
+# for host www1.example.com for 90 minutes
+- zabbix_maintenance: name="Update of www1"
+ host_name=www1.example.com
+ state=present
+ minutes=90
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+
+# Create maintenance window named "Mass update"
+# for host www1.example.com and host groups Office and Dev
+- zabbix_maintenance: name="Update of www1"
+ host_name=www1.example.com
+ host_groups=Office,Dev
+ state=present
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+
+# Create maintenance window named "update"
+# for hosts www1.example.com and db1.example.com and without data collection.
+- zabbix_maintenance: name=update
+ host_names=www1.example.com,db1.example.com
+ state=present
+ collect_data=false
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+
+# Remove maintenance window named "Test1"
+- zabbix_maintenance: name=Test1
+ state=absent
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+'''
+
+import datetime
+import time
+
+try:
+ from zabbix_api import ZabbixAPI
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc):
+ end_time = start_time + period
+ try:
+ zbx.maintenance.create(
+ {
+ "groupids": group_ids,
+ "hostids": host_ids,
+ "name": name,
+ "maintenance_type": maintenance_type,
+ "active_since": str(start_time),
+ "active_till": str(end_time),
+ "description": desc,
+ "timeperiods": [{
+ "timeperiod_type": "0",
+ "start_date": str(start_time),
+ "period": str(period),
+ }]
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def get_maintenance_id(zbx, name):
+ try:
+ result = zbx.maintenance.get(
+ {
+ "filter":
+ {
+ "name": name,
+ }
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+
+ maintenance_ids = []
+ for res in result:
+ maintenance_ids.append(res["maintenanceid"])
+
+ return 0, maintenance_ids, None
+
+
+def delete_maintenance(zbx, maintenance_id):
+ try:
+ zbx.maintenance.delete(maintenance_id)
+ except BaseException as e:
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def get_group_ids(zbx, host_groups):
+ group_ids = []
+ for group in host_groups:
+ try:
+ result = zbx.hostgroup.get(
+ {
+ "output": "extend",
+ "filter":
+ {
+ "name": group
+ }
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+
+ if not result:
+ return 1, None, "Group id for group %s not found" % group
+
+ group_ids.append(result[0]["groupid"])
+
+ return 0, group_ids, None
+
+
+def get_host_ids(zbx, host_names):
+ host_ids = []
+ for host in host_names:
+ try:
+ result = zbx.host.get(
+ {
+ "output": "extend",
+ "filter":
+ {
+ "name": host
+ }
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+
+ if not result:
+ return 1, None, "Host id for host %s not found" % host
+
+ host_ids.append(result[0]["hostid"])
+
+ return 0, host_ids, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ server_url=dict(type='str', required=True, default=None, aliases=['url']),
+ host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
+ minutes=dict(type='int', required=False, default=10),
+ host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ name=dict(type='str', required=True),
+ desc=dict(type='str', required=False, default="Created by Ansible"),
+ collect_data=dict(type='bool', required=False, default=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ host_names = module.params['host_names']
+ host_groups = module.params['host_groups']
+ state = module.params['state']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ minutes = module.params['minutes']
+ name = module.params['name']
+ desc = module.params['desc']
+ server_url = module.params['server_url']
+ collect_data = module.params['collect_data']
+ timeout = module.params['timeout']
+
+ if collect_data:
+ maintenance_type = 0
+ else:
+ maintenance_type = 1
+
+ try:
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except BaseException as e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ changed = False
+
+ if state == "present":
+
+ now = datetime.datetime.now()
+ start_time = time.mktime(now.timetuple())
+ period = 60 * int(minutes) # N * 60 seconds
+
+ if host_groups:
+ (rc, group_ids, error) = get_group_ids(zbx, host_groups)
+ if rc != 0:
+ module.fail_json(msg="Failed to get group_ids: %s" % error)
+ else:
+ group_ids = []
+
+ if host_names:
+ (rc, host_ids, error) = get_host_ids(zbx, host_names)
+ if rc != 0:
+ module.fail_json(msg="Failed to get host_ids: %s" % error)
+ else:
+ host_ids = []
+
+ (rc, maintenance, error) = get_maintenance_id(zbx, name)
+ if rc != 0:
+ module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
+
+ if not maintenance:
+ if not host_names and not host_groups:
+ module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
+
+ if module.check_mode:
+ changed = True
+ else:
+ (rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s" % error)
+
+ if state == "absent":
+
+ (rc, maintenance, error) = get_maintenance_id(zbx, name)
+ if rc != 0:
+ module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
+
+ if maintenance:
+ if module.check_mode:
+ changed = True
+ else:
+ (rc, _, error) = delete_maintenance(zbx, maintenance)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to remove maintenance: %s" % error)
+
+ module.exit_json(changed=changed)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_screen.py b/lib/ansible/modules/extras/monitoring/zabbix_screen.py
new file mode 100644
index 0000000000..ffdcb21b5f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_screen.py
@@ -0,0 +1,435 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = '''
+---
+module: zabbix_screen
+short_description: Zabbix screen creates/updates/deletes
+description:
+ - This module allows you to create, modify and delete Zabbix screens and associated graph data.
+version_added: "2.0"
+author:
+ - "(@cove)"
+ - "Tony Minfei Ding"
+ - "Harrison Gu (@harrisongu)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ screens:
+ description:
+ - List of screens to be created/updated/deleted(see example).
+ - If the screen(s) already been added, the screen(s) name won't be updated.
+ - When creating or updating screen(s), C(screen_name), C(host_group) are required.
+ - When deleting screen(s), the C(screen_name) is required.
+ - 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
+ required: true
+notes:
+ - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
+'''
+
+EXAMPLES = '''
+# Create/update a screen.
+- name: Create a new screen or update an existing screen's items
+ local_action:
+ module: zabbix_screen
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ screens:
+ - screen_name: ExampleScreen1
+ host_group: Example group1
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+
+# Create/update multi-screen
+- name: Create two of new screens or update the existing screens' items
+ local_action:
+ module: zabbix_screen
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ screens:
+ - screen_name: ExampleScreen1
+ host_group: Example group1
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+ - screen_name: ExampleScreen2
+ host_group: Example group2
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+
+# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates
+- name: Create a new screen or update an existing screen's items
+ local_action:
+ module: zabbix_screen
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ state: present
+ screens:
+ - screen_name: ExampleScreen
+ host_group: Example group
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+ when: inventory_hostname==groups['group_name'][0]
+'''
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+ from zabbix_api import ZabbixAPIException
+ from zabbix_api import Already_Exists
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+# Extend the ZabbixAPI
+# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
+# we have to inherit the ZabbixAPI class to add 'screenitem' support.
+class ZabbixAPIExtends(ZabbixAPI):
+ screenitem = None
+
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
+ self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
+
+
+class Screen(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # get group id by group name
+ def get_host_group_id(self, group_name):
+ if group_name == "":
+ self._module.fail_json(msg="group_name is required")
+ hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
+ if len(hostGroup_list) < 1:
+ self._module.fail_json(msg="Host group not found: %s" % group_name)
+ else:
+ hostGroup_id = hostGroup_list[0]['groupid']
+ return hostGroup_id
+
+ # get monitored host_id by host_group_id
+ def get_host_ids_by_group_id(self, group_id):
+ host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
+ if len(host_list) < 1:
+ self._module.fail_json(msg="No host in the group.")
+ else:
+ host_ids = []
+ for i in host_list:
+ host_id = i['hostid']
+ host_ids.append(host_id)
+ return host_ids
+
+ # get screen
+ def get_screen_id(self, screen_name):
+ if screen_name == "":
+ self._module.fail_json(msg="screen_name is required")
+ try:
+ screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
+ if len(screen_id_list) >= 1:
+ screen_id = screen_id_list[0]['screenid']
+ return screen_id
+ return None
+ except Exception as e:
+ self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
+
+ # create screen
+ def create_screen(self, screen_name, h_size, v_size):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
+ return screen['screenids'][0]
+ except Exception as e:
+ self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
+
+ # update screen
+ def update_screen(self, screen_id, screen_name, h_size, v_size):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
+
+ # delete screen
+ def delete_screen(self, screen_id, screen_name):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.screen.delete([screen_id])
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
+
+ # get graph ids
+ def get_graph_ids(self, hosts, graph_name_list):
+ graph_id_lists = []
+ vsize = 1
+ for host in hosts:
+ graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
+ size = len(graph_id_list)
+ if size > 0:
+ graph_id_lists.extend(graph_id_list)
+ if vsize < size:
+ vsize = size
+ return graph_id_lists, vsize
+
+ # getGraphs
+ def get_graphs_by_host_id(self, graph_name_list, host_id):
+ graph_ids = []
+ for graph_name in graph_name_list:
+ graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
+ graph_id_list = []
+ if len(graphs_list) > 0:
+ for graph in graphs_list:
+ graph_id = graph['graphid']
+ graph_id_list.append(graph_id)
+ if len(graph_id_list) > 0:
+ graph_ids.extend(graph_id_list)
+ return graph_ids
+
+ # get screen items
+ def get_screen_items(self, screen_id):
+ screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
+ return screen_item_list
+
+ # delete screen items
+ def delete_screen_items(self, screen_id, screen_item_id_list):
+ try:
+ if len(screen_item_id_list) == 0:
+ return True
+ screen_item_list = self.get_screen_items(screen_id)
+ if len(screen_item_list) > 0:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.screenitem.delete(screen_item_id_list)
+ return True
+ return False
+ except ZabbixAPIException:
+ pass
+
+ # get screen's hsize and vsize
+ def get_hsize_vsize(self, hosts, v_size):
+ h_size = len(hosts)
+ if h_size == 1:
+ if v_size == 1:
+ h_size = 1
+ elif v_size in range(2, 9):
+ h_size = 2
+ else:
+ h_size = 3
+ v_size = (v_size - 1) / h_size + 1
+ return h_size, v_size
+
+ # create screen_items
+ def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
+ if len(hosts) < 4:
+ if width is None or width < 0:
+ width = 500
+ else:
+ if width is None or width < 0:
+ width = 200
+ if height is None or height < 0:
+ height = 100
+
+ try:
+ # when there're only one host, only one row is not good.
+ if len(hosts) == 1:
+ graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
+ for i, graph_id in enumerate(graph_id_list):
+ if graph_id is not None:
+ self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
+ 'width': width, 'height': height,
+ 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1,
+ 'elements': 0, 'valign': 0, 'halign': 0,
+ 'style': 0, 'dynamic': 0, 'sort_triggers': 0})
+ else:
+ for i, host in enumerate(hosts):
+ graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
+ for j, graph_id in enumerate(graph_id_list):
+ if graph_id is not None:
+ self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
+ 'width': width, 'height': height,
+ 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
+ 'elements': 0, 'valign': 0, 'halign': 0,
+ 'style': 0, 'dynamic': 0, 'sort_triggers': 0})
+ except Already_Exists:
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ timeout=dict(type='int', default=10),
+ screens=dict(type='list', required=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ timeout = module.params['timeout']
+ screens = module.params['screens']
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ screen = Screen(module, zbx)
+ created_screens = []
+ changed_screens = []
+ deleted_screens = []
+
+ for zabbix_screen in screens:
+ screen_name = zabbix_screen['screen_name']
+ screen_id = screen.get_screen_id(screen_name)
+ state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
+
+ if state == "absent":
+ if screen_id:
+ screen_item_list = screen.get_screen_items(screen_id)
+ screen_item_id_list = []
+ for screen_item in screen_item_list:
+ screen_item_id = screen_item['screenitemid']
+ screen_item_id_list.append(screen_item_id)
+ screen.delete_screen_items(screen_id, screen_item_id_list)
+ screen.delete_screen(screen_id, screen_name)
+
+ deleted_screens.append(screen_name)
+ else:
+ host_group = zabbix_screen['host_group']
+ graph_names = zabbix_screen['graph_names']
+ graph_width = None
+ if 'graph_width' in zabbix_screen:
+ graph_width = zabbix_screen['graph_width']
+ graph_height = None
+ if 'graph_height' in zabbix_screen:
+ graph_height = zabbix_screen['graph_height']
+ host_group_id = screen.get_host_group_id(host_group)
+ hosts = screen.get_host_ids_by_group_id(host_group_id)
+
+ screen_item_id_list = []
+ resource_id_list = []
+
+ graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
+ h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
+
+ if not screen_id:
+ # create screen
+ screen_id = screen.create_screen(screen_name, h_size, v_size)
+ screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
+ created_screens.append(screen_name)
+ else:
+ screen_item_list = screen.get_screen_items(screen_id)
+
+ for screen_item in screen_item_list:
+ screen_item_id = screen_item['screenitemid']
+ resource_id = screen_item['resourceid']
+ screen_item_id_list.append(screen_item_id)
+ resource_id_list.append(resource_id)
+
+ # when the screen items changed, then update
+ if graph_ids != resource_id_list:
+ deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
+ if deleted:
+ screen.update_screen(screen_id, screen_name, h_size, v_size)
+ screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
+ changed_screens.append(screen_name)
+
+ if created_screens and changed_screens:
+ module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
+ elif created_screens:
+ module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
+ elif changed_screens:
+ module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
+ elif deleted_screens:
+ module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
+ else:
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/__init__.py b/lib/ansible/modules/extras/network/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/__init__.py
diff --git a/lib/ansible/modules/extras/network/a10/__init__.py b/lib/ansible/modules/extras/network/a10/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/a10/__init__.py
diff --git a/lib/ansible/modules/extras/network/a10/a10_server.py b/lib/ansible/modules/extras/network/a10/a10_server.py
new file mode 100644
index 0000000000..d06a2a661a
--- /dev/null
+++ b/lib/ansible/modules/extras/network/a10/a10_server.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage A10 Networks slb server objects
+(c) 2014, Mischa Peters <mpeters@a10networks.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: a10_server
+version_added: 1.8
+short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
+description:
+ - Manage slb server objects on A10 Networks devices via aXAPI
+author: "Mischa Peters (@mischapeters)"
+notes:
+ - Requires A10 Networks aXAPI 2.1
+options:
+ host:
+ description:
+ - hostname or ip of your A10 Networks device
+ required: true
+ username:
+ description:
+ - admin account of your A10 Networks device
+ required: true
+ aliases: ['user', 'admin']
+ password:
+ description:
+ - admin password of your A10 Networks device
+ required: true
+ aliases: ['pass', 'pwd']
+ server_name:
+ description:
+ - slb server name
+ required: true
+ aliases: ['server']
+ server_ip:
+ description:
+ - slb server IP address
+ required: false
+ default: null
+ aliases: ['ip', 'address']
+ server_status:
+ description:
+ - slb virtual server status
+ required: false
+ default: enabled
+ aliases: ['status']
+ choices: ['enabled', 'disabled']
+ server_ports:
+ description:
+ - A list of ports to create for the server. Each list item should be a
+ dictionary which specifies the C(port:) and C(protocol:), but can also optionally
+ specify the C(status:). See the examples below for details. This parameter is
+ required when C(state) is C(present).
+ required: false
+ default: null
+ state:
+ description:
+ - create, update or remove slb server
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ write_config:
+ description:
+ - If C(yes), any changes will cause a write of the running configuration
+ to non-volatile memory. This will save I(all) configuration changes,
+ including those that may have been made manually or through other modules,
+ so care should be taken when specifying C(yes).
+ required: false
+ version_added: 2.2
+ default: "no"
+ choices: ["yes", "no"]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled devices using self-signed certificates.
+ required: false
+ version_added: 2.2
+ default: 'yes'
+ choices: ['yes', 'no']
+
+'''
+
+EXAMPLES = '''
+# Create a new server
+- a10_server:
+ host: a10.mydomain.com
+ username: myadmin
+ password: mypassword
+ server: test
+ server_ip: 1.1.1.100
+ server_ports:
+ - port_num: 8080
+ protocol: tcp
+ - port_num: 8443
+ protocol: TCP
+
+'''
+
+VALID_PORT_FIELDS = ['port_num', 'protocol', 'status']
+
+def validate_ports(module, ports):
+ for item in ports:
+ for key in item:
+ if key not in VALID_PORT_FIELDS:
+ module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
+
+ # validate the port number is present and an integer
+ if 'port_num' in item:
+ try:
+ item['port_num'] = int(item['port_num'])
+ except:
+ module.fail_json(msg="port_num entries in the port definitions must be integers")
+ else:
+ module.fail_json(msg="port definitions must define the port_num field")
+
+ # validate the port protocol is present, and convert it to
+ # the internal API integer value (and validate it)
+ if 'protocol' in item:
+ protocol = axapi_get_port_protocol(item['protocol'])
+ if not protocol:
+ module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS))
+ else:
+ item['protocol'] = protocol
+ else:
+ module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS))
+
+ # convert the status to the internal API integer value
+ if 'status' in item:
+ item['status'] = axapi_enabled_disabled(item['status'])
+ else:
+ item['status'] = 1
+
+
+def main():
+ argument_spec = a10_argument_spec()
+ argument_spec.update(url_argument_spec())
+ argument_spec.update(
+ dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ server_name=dict(type='str', aliases=['server'], required=True),
+ server_ip=dict(type='str', aliases=['ip', 'address']),
+ server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
+ server_ports=dict(type='list', aliases=['port'], default=[]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ host = module.params['host']
+ username = module.params['username']
+ password = module.params['password']
+ state = module.params['state']
+ write_config = module.params['write_config']
+ slb_server = module.params['server_name']
+ slb_server_ip = module.params['server_ip']
+ slb_server_status = module.params['server_status']
+ slb_server_ports = module.params['server_ports']
+
+ if slb_server is None:
+ module.fail_json(msg='server_name is required')
+
+ axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
+ session_url = axapi_authenticate(module, axapi_base_url, username, password)
+
+ # validate the ports data structure
+ validate_ports(module, slb_server_ports)
+
+ json_post = {
+ 'server': {
+ 'name': slb_server,
+ }
+ }
+
+ # add optional module parameters
+ if slb_server_ip:
+ json_post['server']['host'] = slb_server_ip
+
+ if slb_server_ports:
+ json_post['server']['port_list'] = slb_server_ports
+
+ if slb_server_status:
+ json_post['server']['status'] = axapi_enabled_disabled(slb_server_status)
+
+ slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server}))
+ slb_server_exists = not axapi_failure(slb_server_data)
+
+ changed = False
+ if state == 'present':
+ if not slb_server_exists:
+ if not slb_server_ip:
+ module.fail_json(msg='you must specify an IP address when creating a server')
+
+ result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post))
+ if axapi_failure(result):
+ module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg'])
+ changed = True
+ else:
+ def port_needs_update(src_ports, dst_ports):
+ '''
+ Checks to determine if the port definitions of the src_ports
+ array are in or different from those in dst_ports. If there is
+ a difference, this function returns true, otherwise false.
+ '''
+ for src_port in src_ports:
+ found = False
+ different = False
+ for dst_port in dst_ports:
+ if src_port['port_num'] == dst_port['port_num']:
+ found = True
+ for valid_field in VALID_PORT_FIELDS:
+ if src_port[valid_field] != dst_port[valid_field]:
+ different = True
+ break
+ if found or different:
+ break
+ if not found or different:
+ return True
+ # every port from the src exists in the dst, and none of them were different
+ return False
+
+ def status_needs_update(current_status, new_status):
+ '''
+ Check to determine if we want to change the status of a server.
+ If there is a difference between the current status of the server and
+ the desired status, return true, otherwise false.
+ '''
+ if current_status != new_status:
+ return True
+ return False
+
+ defined_ports = slb_server_data.get('server', {}).get('port_list', [])
+ current_status = slb_server_data.get('server', {}).get('status')
+
+ # we check for a needed update several ways
+ # - in case ports are missing from the ones specified by the user
+ # - in case ports are missing from those on the device
+ # - in case we are change the status of a server
+ if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)):
+ result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post))
+ if axapi_failure(result):
+ module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg'])
+ changed = True
+
+ # if we changed things, get the full info regarding
+ # the service group for the return data below
+ if changed:
+ result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server}))
+ else:
+ result = slb_server_data
+ elif state == 'absent':
+ if slb_server_exists:
+ result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server}))
+ changed = True
+ else:
+ result = dict(msg="the server was not present")
+
+ # if the config has changed, save the config unless otherwise requested
+ if changed and write_config:
+ write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
+ if axapi_failure(write_result):
+ module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
+
+ # log out of the session nicely and exit
+ axapi_call(module, session_url + '&method=session.close')
+ module.exit_json(changed=changed, content=result)
+
+# standard ansible module imports
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.a10 import *
+
+main()
diff --git a/lib/ansible/modules/extras/network/a10/a10_service_group.py b/lib/ansible/modules/extras/network/a10/a10_service_group.py
new file mode 100644
index 0000000000..af664084b6
--- /dev/null
+++ b/lib/ansible/modules/extras/network/a10/a10_service_group.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage A10 Networks slb service-group objects
+(c) 2014, Mischa Peters <mpeters@a10networks.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: a10_service_group
+version_added: 1.8
+short_description: Manage A10 Networks devices' service groups
+description:
+ - Manage slb service-group objects on A10 Networks devices via aXAPI
+author: "Mischa Peters (@mischapeters)"
+notes:
+ - Requires A10 Networks aXAPI 2.1
+ - When a server doesn't exist and is added to the service-group the server will be created
+options:
+ host:
+ description:
+ - hostname or ip of your A10 Networks device
+ required: true
+ default: null
+ aliases: []
+ choices: []
+ username:
+ description:
+ - admin account of your A10 Networks device
+ required: true
+ default: null
+ aliases: ['user', 'admin']
+ choices: []
+ password:
+ description:
+ - admin password of your A10 Networks device
+ required: true
+ default: null
+ aliases: ['pass', 'pwd']
+ choices: []
+ service_group:
+ description:
+ - slb service-group name
+ required: true
+ default: null
+ aliases: ['service', 'pool', 'group']
+ choices: []
+ service_group_protocol:
+ description:
+ - slb service-group protocol
+ required: false
+ default: tcp
+ aliases: ['proto', 'protocol']
+ choices: ['tcp', 'udp']
+ service_group_method:
+ description:
+ - slb service-group loadbalancing method
+ required: false
+ default: round-robin
+ aliases: ['method']
+ choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
+ servers:
+ description:
+ - A list of servers to add to the service group. Each list item should be a
+ dictionary which specifies the C(server:) and C(port:), but can also optionally
+ specify the C(status:). See the examples below for details.
+ required: false
+ default: null
+ aliases: []
+ choices: []
+ write_config:
+ description:
+ - If C(yes), any changes will cause a write of the running configuration
+ to non-volatile memory. This will save I(all) configuration changes,
+ including those that may have been made manually or through other modules,
+ so care should be taken when specifying C(yes).
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled devices using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+'''
+
+EXAMPLES = '''
+# Create a new service-group
+- a10_service_group:
+ host: a10.mydomain.com
+ username: myadmin
+ password: mypassword
+ service_group: sg-80-tcp
+ servers:
+ - server: foo1.mydomain.com
+ port: 8080
+ - server: foo2.mydomain.com
+ port: 8080
+ - server: foo3.mydomain.com
+ port: 8080
+ - server: foo4.mydomain.com
+ port: 8080
+ status: disabled
+
+'''
+
+VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
+VALID_SERVER_FIELDS = ['server', 'port', 'status']
+
+def validate_servers(module, servers):
+ for item in servers:
+ for key in item:
+ if key not in VALID_SERVER_FIELDS:
+ module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
+
+ # validate the server name is present
+ if 'server' not in item:
+ module.fail_json(msg="server definitions must define the server field")
+
+ # validate the port number is present and an integer
+ if 'port' in item:
+ try:
+ item['port'] = int(item['port'])
+ except:
+ module.fail_json(msg="server port definitions must be integers")
+ else:
+ module.fail_json(msg="server definitions must define the port field")
+
+ # convert the status to the internal API integer value
+ if 'status' in item:
+ item['status'] = axapi_enabled_disabled(item['status'])
+ else:
+ item['status'] = 1
+
+
+def main():
+ argument_spec = a10_argument_spec()
+ argument_spec.update(url_argument_spec())
+ argument_spec.update(
+ dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
+ service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
+ service_group_method=dict(type='str', default='round-robin',
+ aliases=['method'],
+ choices=['round-robin',
+ 'weighted-rr',
+ 'least-connection',
+ 'weighted-least-connection',
+ 'service-least-connection',
+ 'service-weighted-least-connection',
+ 'fastest-response',
+ 'least-request',
+ 'round-robin-strict',
+ 'src-ip-only-hash',
+ 'src-ip-hash']),
+ servers=dict(type='list', aliases=['server', 'member'], default=[]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ host = module.params['host']
+ username = module.params['username']
+ password = module.params['password']
+ state = module.params['state']
+ write_config = module.params['write_config']
+ slb_service_group = module.params['service_group']
+ slb_service_group_proto = module.params['service_group_protocol']
+ slb_service_group_method = module.params['service_group_method']
+ slb_servers = module.params['servers']
+
+ if slb_service_group is None:
+ module.fail_json(msg='service_group is required')
+
+ axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
+ load_balancing_methods = {'round-robin': 0,
+ 'weighted-rr': 1,
+ 'least-connection': 2,
+ 'weighted-least-connection': 3,
+ 'service-least-connection': 4,
+ 'service-weighted-least-connection': 5,
+ 'fastest-response': 6,
+ 'least-request': 7,
+ 'round-robin-strict': 8,
+ 'src-ip-only-hash': 14,
+ 'src-ip-hash': 15}
+
+ if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
+ protocol = 2
+ else:
+ protocol = 3
+
+ # validate the server data list structure
+ validate_servers(module, slb_servers)
+
+ json_post = {
+ 'service_group': {
+ 'name': slb_service_group,
+ 'protocol': protocol,
+ 'lb_method': load_balancing_methods[slb_service_group_method],
+ }
+ }
+
+ # first we authenticate to get a session id
+ session_url = axapi_authenticate(module, axapi_base_url, username, password)
+
+ # then we check to see if the specified group exists
+ slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
+ slb_service_group_exist = not axapi_failure(slb_result)
+
+ changed = False
+ if state == 'present':
+ # before creating/updating we need to validate that servers
+ # defined in the servers list exist to prevent errors
+ checked_servers = []
+ for server in slb_servers:
+ result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
+ if axapi_failure(result):
+ module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
+ checked_servers.append(server['server'])
+
+ if not slb_service_group_exist:
+ result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
+ if axapi_failure(result):
+ module.fail_json(msg=result['response']['err']['msg'])
+ changed = True
+ else:
+ # check to see if the service group definition without the
+ # server members is different, and update that individually
+ # if it needs it
+ do_update = False
+ for field in VALID_SERVICE_GROUP_FIELDS:
+ if json_post['service_group'][field] != slb_result['service_group'][field]:
+ do_update = True
+ break
+
+ if do_update:
+ result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
+ if axapi_failure(result):
+ module.fail_json(msg=result['response']['err']['msg'])
+ changed = True
+
+ # next we pull the defined list of servers out of the returned
+ # results to make it a bit easier to iterate over
+ defined_servers = slb_result.get('service_group', {}).get('member_list', [])
+
+ # next we add/update new member servers from the user-specified
+ # list if they're different or not on the target device
+ for server in slb_servers:
+ found = False
+ different = False
+ for def_server in defined_servers:
+ if server['server'] == def_server['server']:
+ found = True
+ for valid_field in VALID_SERVER_FIELDS:
+ if server[valid_field] != def_server[valid_field]:
+ different = True
+ break
+ if found or different:
+ break
+ # add or update as required
+ server_data = {
+ "name": slb_service_group,
+ "member": server,
+ }
+ if not found:
+ result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
+ changed = True
+ elif different:
+ result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
+ changed = True
+
+ # finally, remove any servers that are on the target
+ # device but were not specified in the list given
+ for server in defined_servers:
+ found = False
+ for slb_server in slb_servers:
+ if server['server'] == slb_server['server']:
+ found = True
+ break
+ # remove if not found
+ server_data = {
+ "name": slb_service_group,
+ "member": server,
+ }
+ if not found:
+ result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
+ changed = True
+
+ # if we changed things, get the full info regarding
+ # the service group for the return data below
+ if changed:
+ result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
+ else:
+ result = slb_result
+ elif state == 'absent':
+ if slb_service_group_exist:
+ result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
+ changed = True
+ else:
+ result = dict(msg="the service group was not present")
+
+ # if the config has changed, save the config unless otherwise requested
+ if changed and write_config:
+ write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
+ if axapi_failure(write_result):
+ module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
+
+ # log out of the session nicely and exit
+ axapi_call(module, session_url + '&method=session.close')
+ module.exit_json(changed=changed, content=result)
+
+# standard ansible module imports
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.a10 import *
+
+main()
diff --git a/lib/ansible/modules/extras/network/a10/a10_virtual_server.py b/lib/ansible/modules/extras/network/a10/a10_virtual_server.py
new file mode 100644
index 0000000000..1a04f1a175
--- /dev/null
+++ b/lib/ansible/modules/extras/network/a10/a10_virtual_server.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage A10 Networks slb virtual server objects
+(c) 2014, Mischa Peters <mpeters@a10networks.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: a10_virtual_server
+version_added: 1.8
+short_description: Manage A10 Networks devices' virtual servers
+description:
+ - Manage slb virtual server objects on A10 Networks devices via aXAPI
+author: "Mischa Peters (@mischapeters)"
+notes:
+ - Requires A10 Networks aXAPI 2.1
+requirements: []
+options:
+ host:
+ description:
+ - hostname or ip of your A10 Networks device
+ required: true
+ default: null
+ aliases: []
+ choices: []
+ username:
+ description:
+ - admin account of your A10 Networks device
+ required: true
+ default: null
+ aliases: ['user', 'admin']
+ choices: []
+ password:
+ description:
+ - admin password of your A10 Networks device
+ required: true
+ default: null
+ aliases: ['pass', 'pwd']
+ choices: []
+ virtual_server:
+ description:
+ - slb virtual server name
+ required: true
+ default: null
+ aliases: ['vip', 'virtual']
+ choices: []
+ virtual_server_ip:
+ description:
+ - slb virtual server ip address
+ required: false
+ default: null
+ aliases: ['ip', 'address']
+ choices: []
+ virtual_server_status:
+ description:
+ - slb virtual server status
+ required: false
+ default: enable
+ aliases: ['status']
+ choices: ['enabled', 'disabled']
+ virtual_server_ports:
+ description:
+ - A list of ports to create for the virtual server. Each list item should be a
+ dictionary which specifies the C(port:) and C(type:), but can also optionally
+ specify the C(service_group:) as well as the C(status:). See the examples
+ below for details. This parameter is required when C(state) is C(present).
+ required: false
+ write_config:
+ description:
+ - If C(yes), any changes will cause a write of the running configuration
+ to non-volatile memory. This will save I(all) configuration changes,
+ including those that may have been made manually or through other modules,
+ so care should be taken when specifying C(yes).
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled devices using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+'''
+
+EXAMPLES = '''
+# Create a new virtual server
+- a10_virtual_server:
+ host: a10.mydomain.com
+ username: myadmin
+ password: mypassword
+ virtual_server: vserver1
+ virtual_server_ip: 1.1.1.1
+ virtual_server_ports:
+ - port: 80
+ protocol: TCP
+ service_group: sg-80-tcp
+ - port: 443
+ protocol: HTTPS
+ service_group: sg-443-https
+ - port: 8080
+ protocol: http
+ status: disabled
+
+'''
+
+VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
+
+def validate_ports(module, ports):
+ for item in ports:
+ for key in item:
+ if key not in VALID_PORT_FIELDS:
+ module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
+
+ # validate the port number is present and an integer
+ if 'port' in item:
+ try:
+ item['port'] = int(item['port'])
+ except:
+ module.fail_json(msg="port definitions must be integers")
+ else:
+ module.fail_json(msg="port definitions must define the port field")
+
+ # validate the port protocol is present, and convert it to
+ # the internal API integer value (and validate it)
+ if 'protocol' in item:
+ protocol = axapi_get_vport_protocol(item['protocol'])
+ if not protocol:
+ module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
+ else:
+ item['protocol'] = protocol
+ else:
+ module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
+
+ # convert the status to the internal API integer value
+ if 'status' in item:
+ item['status'] = axapi_enabled_disabled(item['status'])
+ else:
+ item['status'] = 1
+
+ # ensure the service_group field is at least present
+ if 'service_group' not in item:
+ item['service_group'] = ''
+
+def main():
+ argument_spec = a10_argument_spec()
+ argument_spec.update(url_argument_spec())
+ argument_spec.update(
+ dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
+ virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
+ virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
+ virtual_server_ports=dict(type='list', required=True),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ host = module.params['host']
+ username = module.params['username']
+ password = module.params['password']
+ state = module.params['state']
+ write_config = module.params['write_config']
+ slb_virtual = module.params['virtual_server']
+ slb_virtual_ip = module.params['virtual_server_ip']
+ slb_virtual_status = module.params['virtual_server_status']
+ slb_virtual_ports = module.params['virtual_server_ports']
+
+ if slb_virtual is None:
+ module.fail_json(msg='virtual_server is required')
+
+ validate_ports(module, slb_virtual_ports)
+
+ axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
+ session_url = axapi_authenticate(module, axapi_base_url, username, password)
+
+ slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
+ slb_virtual_exists = not axapi_failure(slb_virtual_data)
+
+ changed = False
+ if state == 'present':
+ json_post = {
+ 'virtual_server': {
+ 'name': slb_virtual,
+ 'address': slb_virtual_ip,
+ 'status': axapi_enabled_disabled(slb_virtual_status),
+ 'vport_list': slb_virtual_ports,
+ }
+ }
+
+ # before creating/updating we need to validate that any
+ # service groups defined in the ports list exist since
+ # since the API will still create port definitions for
+ # them while indicating a failure occurred
+ checked_service_groups = []
+ for port in slb_virtual_ports:
+ if 'service_group' in port and port['service_group'] not in checked_service_groups:
+ # skip blank service group entries
+ if port['service_group'] == '':
+ continue
+ result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
+ if axapi_failure(result):
+ module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
+ checked_service_groups.append(port['service_group'])
+
+ if not slb_virtual_exists:
+ result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
+ if axapi_failure(result):
+ module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
+ changed = True
+ else:
+ def needs_update(src_ports, dst_ports):
+ '''
+ Checks to determine if the port definitions of the src_ports
+ array are in or different from those in dst_ports. If there is
+ a difference, this function returns true, otherwise false.
+ '''
+ for src_port in src_ports:
+ found = False
+ different = False
+ for dst_port in dst_ports:
+ if src_port['port'] == dst_port['port']:
+ found = True
+ for valid_field in VALID_PORT_FIELDS:
+ if src_port[valid_field] != dst_port[valid_field]:
+ different = True
+ break
+ if found or different:
+ break
+ if not found or different:
+ return True
+ # every port from the src exists in the dst, and none of them were different
+ return False
+
+ defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
+
+ # we check for a needed update both ways, in case ports
+ # are missing from either the ones specified by the user
+ # or from those on the device
+ if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
+ result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
+ if axapi_failure(result):
+ module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
+ changed = True
+
+ # if we changed things, get the full info regarding
+ # the service group for the return data below
+ if changed:
+ result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
+ else:
+ result = slb_virtual_data
+ elif state == 'absent':
+ if slb_virtual_exists:
+ result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
+ changed = True
+ else:
+ result = dict(msg="the virtual server was not present")
+
+ # if the config has changed, save the config unless otherwise requested
+ if changed and write_config:
+ write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
+ if axapi_failure(write_result):
+ module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
+
+ # log out of the session nicely and exit
+ axapi_call(module, session_url + '&method=session.close')
+ module.exit_json(changed=changed, content=result)
+
+# standard ansible module imports
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.a10 import *
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/network/asa/__init__.py b/lib/ansible/modules/extras/network/asa/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/asa/__init__.py
diff --git a/lib/ansible/modules/extras/network/asa/asa_acl.py b/lib/ansible/modules/extras/network/asa/asa_acl.py
new file mode 100644
index 0000000000..55d3fd58cd
--- /dev/null
+++ b/lib/ansible/modules/extras/network/asa/asa_acl.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = """
+---
+module: asa_acl
+version_added: "2.2"
+author: "Patrick Ogenstad (@ogenstad)"
+short_description: Manage access-lists on a Cisco ASA
+description:
+ - This module allows you to work with access-lists on a Cisco ASA device.
+extends_documentation_fragment: asa
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser.
+ required: true
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system.
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a changed needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. Finally if match is set to I(exact), command lines
+ must be an equal match.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct.
+ required: false
+ default: line
+ choices: ['line', 'block']
+ force:
+ description:
+ - The force argument instructs the module to not consider the
+ current devices running-config. When set to true, this will
+ cause the module to push the contents of I(src) into the device
+ without first checking if already configured.
+ required: false
+ default: false
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The module, by default, will connect to the remote device and
+ retrieve the current running-config to use as a base for comparing
+ against the contents of source. There are times when it is not
+ desirable to have the task get the current running-config for
+ every task in a playbook. The I(config) argument allows the
+ implementer to pass in the configuruation to use as the base
+ config for comparision.
+ required: false
+ default: null
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ transport: cli
+ authorize: yes
+ auth_pass: cisco
+
+- asa_acl:
+ lines:
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 82
+ - access-list ACL-ANSIBLE extended permit tcp any any eq www
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 97
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 98
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 99
+ before: clear configure access-list ACL-ANSIBLE
+ match: strict
+ replace: block
+ provider: "{{ cli }}"
+
+- asa_acl:
+ lines:
+ - access-list ACL-OUTSIDE extended permit tcp any any eq www
+ - access-list ACL-OUTSIDE extended permit tcp any any eq https
+ context: customer_a
+ provider: "{{ cli }}"
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+responses:
+ description: The set of responses from issuing the commands on the device
+ retured: when not check_mode
+ type: list
+ sample: ['...', '...']
+"""
+import ansible.module_utils.asa
+
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+
+def get_config(module, acl_name):
+ contents = module.params['config']
+ if not contents:
+ contents = module.config.get_config()
+
+ filtered_config = list()
+ for item in contents.split('\n'):
+ if item.startswith('access-list %s' % acl_name):
+ filtered_config.append(item)
+
+ return NetworkConfig(indent=1, contents='\n'.join(filtered_config))
+
+def parse_acl_name(module):
+ first_line = True
+ for line in module.params['lines']:
+ ace = line.split()
+ if ace[0] != 'access-list':
+ module.fail_json(msg='All lines/commands must begin with "access-list" %s is not permitted' % ace[0])
+ if len(ace) <= 1:
+ module.fail_json(msg='All lines/commands must contain the name of the access-list')
+ if first_line:
+ acl_name = ace[1]
+ else:
+ if acl_name != ace[1]:
+ module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name))
+ first_line = False
+
+ return acl_name
+
+def main():
+
+ argument_spec = dict(
+ lines=dict(aliases=['commands'], required=True, type='list'),
+ before=dict(type='list'),
+ after=dict(type='list'),
+ match=dict(default='line', choices=['line', 'strict', 'exact']),
+ replace=dict(default='line', choices=['line', 'block']),
+ force=dict(default=False, type='bool'),
+ config=dict()
+ )
+
+ module = NetworkModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ lines = module.params['lines']
+
+ before = module.params['before']
+ after = module.params['after']
+
+ match = module.params['match']
+ replace = module.params['replace']
+
+ result = dict(changed=False)
+
+ candidate = NetworkConfig(indent=1)
+ candidate.add(lines)
+
+ acl_name = parse_acl_name(module)
+
+ if not module.params['force']:
+ contents = get_config(module, acl_name)
+ config = NetworkConfig(indent=1, contents=contents)
+
+ commands = candidate.difference(config)
+ commands = dumps(commands, 'commands').split('\n')
+ commands = [str(c) for c in commands if c]
+ else:
+ commands = str(candidate).split('\n')
+
+ if commands:
+ if not module.check_mode:
+ response = module.config(commands)
+ result['responses'] = response
+ result['changed'] = True
+
+ result['updates'] = commands
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/asa/asa_command.py b/lib/ansible/modules/extras/network/asa/asa_command.py
new file mode 100644
index 0000000000..9d013ebd19
--- /dev/null
+++ b/lib/ansible/modules/extras/network/asa/asa_command.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = """
+---
+module: asa_command
+version_added: "2.2"
+author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
+short_description: Run arbitrary commands on Cisco ASA devices.
+description:
+ - Sends arbitrary commands to an ASA node and returns the results
+ read from the device. The M(asa_command) module includes an
+ argument that will cause the module to wait for a specific condition
+ before returning or timing out if the condition is not met.
+extends_documentation_fragment: asa
+options:
+ commands:
+ description:
+ - List of commands to send to the remote device over the
+ configured provider. The resulting output from the command
+ is returned. If the I(wait_for) argument is provided, the
+ module is not returned until the condition is satisfied or
+ the number of retires as expired.
+ required: true
+ wait_for:
+ description:
+ - List of conditions to evaluate against the output of the
+ command. The task will wait for each condition to be true
+ before moving forward. If the conditional is not true
+ within the configured number of retries, the task fails.
+ See examples.
+ required: false
+ default: null
+ aliases: ['waitfor']
+ match:
+ description:
+ - The I(match) argument is used in conjunction with the
+ I(wait_for) argument to specify the match policy. Valid
+ values are C(all) or C(any). If the value is set to C(all)
+ then all conditionals in the wait_for must be satisfied. If
+ the value is set to C(any) then only one of the values must be
+ satisfied.
+ required: false
+ default: all
+ choices: ['any', 'all']
+ retries:
+ description:
+ - Specifies the number of retries a command should by tried
+ before it is considered failed. The command is run on the
+ target device every retry and evaluated against the
+ I(wait_for) conditions.
+ required: false
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between retries
+ of the command. If the command does not pass the specified
+ conditions, the interval indicates how long to wait before
+ trying the command again.
+ required: false
+ default: 1
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ authorize: yes
+ auth_pass: cisco
+ transport: cli
+
+
+- asa_command:
+ commands:
+ - show version
+ provider: "{{ cli }}"
+
+- asa_command:
+ commands:
+ - show asp drop
+ - show memory
+ provider: "{{ cli }}"
+
+- asa_command:
+ commands:
+ - show version
+ provider: "{{ cli }}"
+ context: system
+"""
+
+RETURN = """
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+
+failed_conditions:
+ description: the conditionals that failed
+ retured: failed
+ type: list
+ sample: ['...', '...']
+"""
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
+from ansible.module_utils.asa import NetworkModule, NetworkError
+
+VALID_KEYS = ['command', 'prompt', 'response']
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, basestring):
+ item = str(item).split('\n')
+ yield item
+
+def parse_commands(module):
+ for cmd in module.params['commands']:
+ if isinstance(cmd, basestring):
+ cmd = dict(command=cmd, output=None)
+ elif 'command' not in cmd:
+ module.fail_json(msg='command keyword argument is required')
+ elif not set(cmd.keys()).issubset(VALID_KEYS):
+ module.fail_json(msg='unknown keyword specified')
+ yield cmd
+
+def main():
+ spec = dict(
+ # { command: <str>, prompt: <str>, response: <str> }
+ commands=dict(type='list', required=True),
+
+ wait_for=dict(type='list', aliases=['waitfor']),
+ match=dict(default='all', choices=['all', 'any']),
+
+ retries=dict(default=10, type='int'),
+ interval=dict(default=1, type='int')
+ )
+
+ module = NetworkModule(argument_spec=spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ commands = list(parse_commands(module))
+ conditionals = module.params['wait_for'] or list()
+
+ warnings = list()
+
+ runner = CommandRunner(module)
+
+ for cmd in commands:
+ if module.check_mode and not cmd['command'].startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd['command'])
+ else:
+ if cmd['command'].startswith('conf'):
+ module.fail_json(msg='asa_command does not support running '
+ 'config mode commands. Please use '
+ 'asa_config instead')
+ try:
+ runner.add_command(**cmd)
+ except AddCommandError:
+ exc = get_exception()
+ warnings.append('duplicate command detected: %s' % cmd)
+
+ for item in conditionals:
+ runner.add_conditional(item)
+
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+ runner.match = module.params['match']
+
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result = dict(changed=False, stdout=list())
+
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd['command'])
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
+
+ result['warnings'] = warnings
+ result['stdout_lines'] = list(to_lines(result['stdout']))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/network/asa/asa_config.py b/lib/ansible/modules/extras/network/asa/asa_config.py
new file mode 100644
index 0000000000..e90f5fbfaa
--- /dev/null
+++ b/lib/ansible/modules/extras/network/asa/asa_config.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = """
+---
+module: asa_config
+version_added: "2.2"
+author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
+short_description: Manage Cisco ASA configuration sections
+description:
+ - Cisco ASA configurations use a simple block indent file syntax
+ for segmenting configuration into sections. This module provides
+ an implementation for working with ASA configuration sections in
+ a deterministic way.
+extends_documentation_fragment: asa
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser.
+ required: false
+ default: null
+ aliases: ['commands']
+ parents:
+ description:
+ - The ordered set of parents that uniquely identify the section
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ required: false
+ default: null
+ src:
+ description:
+ - Specifies the source path to the file that contains the configuration
+ or configuration template to load. The path to the source file can
+ either be the full path on the Ansible control host or a relative
+ path from the playbook or role root directory. This argument is mutually
+ exclusive with I(lines).
+ required: false
+ default: null
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a change needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact', 'none']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct
+ required: false
+ default: line
+ choices: ['line', 'block']
+ update:
+ description:
+ - The I(update) argument controls how the configuration statements
+ are processed on the remote device. Valid choices for the I(update)
+ argument are I(merge) and I(check). When the argument is set to
+ I(merge), the configuration changes are merged with the current
+ device running configuration. When the argument is set to I(check)
+ the configuration updates are determined but not actually configured
+ on the remote device.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ commit:
+ description:
+ - This argument specifies the update method to use when applying the
+ configuration changes to the remote node. If the value is set to
+ I(merge) the configuration updates are merged with the running-
+ config. If the value is set to I(check), no changes are made to
+ the remote host.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The C(config) argument allows the playbook designer to supply
+ the base configuration to be used to validate configuration
+ changes necessary. If this argument is provided, the module
+ will not download the running-config from the remote node.
+ required: false
+ default: null
+ default:
+ description:
+ - This argument specifies whether or not to collect all defaults
+ when getting the remote device running config. When enabled,
+ the module will get the current config by issuing the command
+ C(show running-config all).
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ save:
+ description:
+ - The C(save) argument instructs the module to save the running-
+ config to the startup-config at the conclusion of the module
+ running. If check mode is specified, this argument is ignored.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ authorize: yes
+ auth_pass: cisco
+ transport: cli
+
+- asa_config:
+ lines:
+ - network-object host 10.80.30.18
+ - network-object host 10.80.30.19
+ - network-object host 10.80.30.20
+ parents: ['object-group network OG-MONITORED-SERVERS']
+ provider: "{{ cli }}"
+
+- asa_config:
+ host: "{{ inventory_hostname }}"
+ lines:
+ - message-length maximum client auto
+ - message-length maximum 512
+ match: line
+ parents: ['policy-map type inspect dns PM-DNS', 'parameters']
+ authorize: yes
+ auth_pass: cisco
+ username: admin
+ password: cisco
+ context: ansible
+
+- asa_config:
+ show_command: 'more system:running-config'
+ lines:
+ - ikev1 pre-shared-key MyS3cretVPNK3y
+ parents: tunnel-group 1.1.1.1 ipsec-attributes
+ provider: "{{ cli }}"
+
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: path
+ sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34
+responses:
+ description: The set of responses from issuing the commands on the device
+ returned: when not check_mode
+ type: list
+ sample: ['...', '...']
+"""
+import re
+
+import ansible.module_utils.asa
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+def get_config(module):
+ contents = module.params['config']
+ if not contents:
+ defaults = module.params['default']
+ contents = module.config.get_config(include_defaults=defaults)
+ return NetworkConfig(indent=1, contents=contents)
+
+def get_candidate(module):
+ candidate = NetworkConfig(indent=1)
+ if module.params['src']:
+ candidate.load(module.params['src'])
+ elif module.params['lines']:
+ parents = module.params['parents'] or list()
+ candidate.add(module.params['lines'], parents=parents)
+ return candidate
+
+def run(module, result):
+ match = module.params['match']
+ replace = module.params['replace']
+ path = module.params['parents']
+
+ candidate = get_candidate(module)
+
+ if match != 'none':
+ config = get_config(module)
+ configobjs = candidate.difference(config, path=path, match=match,
+ replace=replace)
+ else:
+ configobjs = candidate.items
+
+ if configobjs:
+ commands = dumps(configobjs, 'commands').split('\n')
+
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
+
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ result['updates'] = commands
+
+ # send the configuration commands to the device and merge
+ # them with the current running config
+ if not module.check_mode:
+ module.config.load_config(commands)
+ result['changed'] = True
+
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
+
+def main():
+ """ main entry point for module execution
+ """
+ argument_spec = dict(
+ src=dict(type='path'),
+
+ lines=dict(aliases=['commands'], type='list'),
+ parents=dict(type='list'),
+
+ before=dict(type='list'),
+ after=dict(type='list'),
+
+ match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
+ replace=dict(default='line', choices=['line', 'block']),
+
+ config=dict(),
+ default=dict(type='bool', default=False),
+
+ backup=dict(type='bool', default=False),
+ save=dict(type='bool', default=False),
+ )
+
+ mutually_exclusive = [('lines', 'src')]
+
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines'])]
+
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ result = dict(changed=False)
+
+ if module.params['backup']:
+ result['__backup__'] = module.config.get_config()
+
+ try:
+ run(module, result)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/citrix/__init__.py b/lib/ansible/modules/extras/network/citrix/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/citrix/__init__.py
diff --git a/lib/ansible/modules/extras/network/citrix/netscaler.py b/lib/ansible/modules/extras/network/citrix/netscaler.py
new file mode 100644
index 0000000000..384a625bdc
--- /dev/null
+++ b/lib/ansible/modules/extras/network/citrix/netscaler.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage Citrix NetScaler entities
+(c) 2013, Nandor Sivok <nandor@gawker.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: netscaler
+version_added: "1.1"
+short_description: Manages Citrix NetScaler entities
+description:
+ - Manages Citrix NetScaler server and service entities.
+options:
+ nsc_host:
+ description:
+ - hostname or ip of your netscaler
+ required: true
+ default: null
+ aliases: []
+ nsc_protocol:
+ description:
+ - protocol used to access netscaler
+ required: false
+ default: https
+ aliases: []
+ user:
+ description:
+ - username
+ required: true
+ default: null
+ aliases: []
+ password:
+ description:
+ - password
+ required: true
+ default: null
+ aliases: []
+ action:
+ description:
+ - the action you want to perform on the entity
+ required: false
+ default: disable
+ choices: ["enable", "disable"]
+ aliases: []
+ name:
+ description:
+ - name of the entity
+ required: true
+ default: hostname
+ aliases: []
+ type:
+ description:
+ - type of the entity
+ required: false
+ default: server
+ choices: ["server", "service"]
+ aliases: []
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+requirements: []
+author: "Nandor Sivok (@dominis)"
+'''
+
+EXAMPLES = '''
+# Disable the server
+ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass"
+
+# Enable the server
+ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable"
+
+# Disable the service local:8080
+ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable"
+'''
+
+
+import base64
+import socket
+import urllib
+
+class netscaler(object):
+
+ _nitro_base_url = '/nitro/v1/'
+
+ def __init__(self, module):
+ self.module = module
+
+ def http_request(self, api_endpoint, data_json={}):
+ request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint
+
+ data_json = urllib.urlencode(data_json)
+ if not len(data_json):
+ data_json = None
+
+ auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ 'Content-Type' : 'application/x-www-form-urlencoded',
+ }
+
+ response, info = fetch_url(self.module, request_url, data=data_json, headers=headers)
+
+ return json.load(response)
+
+ def prepare_request(self, action):
+ resp = self.http_request(
+ 'config',
+ {
+ "object":
+ {
+ "params": {"action": action},
+ self._type: {"name": self._name}
+ }
+ }
+ )
+
+ return resp
+
+
+def core(module):
+ n = netscaler(module)
+ n._nsc_host = module.params.get('nsc_host')
+ n._nsc_user = module.params.get('user')
+ n._nsc_pass = module.params.get('password')
+ n._nsc_protocol = module.params.get('nsc_protocol')
+ n._name = module.params.get('name')
+ n._type = module.params.get('type')
+ action = module.params.get('action')
+
+ r = n.prepare_request(action)
+
+ return r['errorcode'], r
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ nsc_host = dict(required=True),
+ nsc_protocol = dict(default='https'),
+ user = dict(required=True),
+ password = dict(required=True),
+ action = dict(default='enable', choices=['enable','disable']),
+ name = dict(default=socket.gethostname()),
+ type = dict(default='server', choices=['service', 'server']),
+ validate_certs=dict(default='yes', type='bool'),
+ )
+ )
+
+ rc = 0
+ try:
+ rc, result = core(module)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0:
+ module.fail_json(rc=rc, msg=result)
+ else:
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
diff --git a/lib/ansible/modules/extras/network/cloudflare_dns.py b/lib/ansible/modules/extras/network/cloudflare_dns.py
new file mode 100644
index 0000000000..71cfab22a4
--- /dev/null
+++ b/lib/ansible/modules/extras/network/cloudflare_dns.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+import urllib
+
+DOCUMENTATION = '''
+---
+module: cloudflare_dns
+author: "Michael Gruener (@mgruener)"
+requirements:
+ - "python >= 2.6"
+version_added: "2.1"
+short_description: manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
+options:
+ account_api_token:
+ description:
+ - "Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)"
+ required: true
+ account_email:
+ description:
+ - "Account email."
+ required: true
+ port:
+ description: Service port. Required for C(type=SRV)
+ required: false
+ default: null
+ priority:
+ description: Record priority. Required for C(type=MX) and C(type=SRV)
+ required: false
+ default: "1"
+ proto:
+ description: Service protocol. Required for C(type=SRV)
+ required: false
+ choices: [ 'tcp', 'udp' ]
+ default: null
+ record:
+ description:
+ - Record to add. Required if C(state=present). Default is C(@) (e.g. the zone name)
+ required: false
+ default: "@"
+ aliases: [ "name" ]
+ service:
+ description: Record service. Required for C(type=SRV)
+ required: false
+ default: null
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with C(state=present)
+ - This will delete all other records with the same record name and type.
+ required: false
+ default: null
+ state:
+ description:
+ - Whether the record(s) should exist or not
+ required: false
+ choices: [ 'present', 'absent' ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls
+ required: false
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record. Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ required: false
+ default: 1 (automatic)
+ type:
+ description:
+ - The type of DNS record to create. Required if C(state=present)
+ required: false
+ choices: [ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ]
+ default: null
+ value:
+ description:
+ - The record value. Required for C(state=present)
+ required: false
+ default: null
+ aliases: [ "content" ]
+ weight:
+ description: Service weight. Required for C(type=SRV)
+ required: false
+ default: "1"
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com"). The Zone must already exist.
+ required: true
+ aliases: ["domain"]
+'''
+
+EXAMPLES = '''
+# create a test.my.com A record to point to 127.0.0.1
+- cloudflare_dns:
+ zone: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ register: record
+
+# create a my.com CNAME record to example.com
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ state: present
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# change it's ttl
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# and delete the record
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ state: absent
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# create TXT record "test.my.com" with value "unique value"
+# delete all other TXT records named "test.my.com"
+- cloudflare_dns:
+ domain: my.com
+ record: test
+ type: TXT
+ value: unique value
+ state: present
+ solo: true
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# create a SRV record _foo._tcp.my.com
+- cloudflare_dns:
+ domain: my.com
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.my.com
+'''
+
+RETURN = '''
+record:
+ description: dictionary containing the record data
+ returned: success, except on record deletion
+ type: dictionary
+ contains:
+ content:
+ description: the record content (details depend on record type)
+ returned: success
+ type: string
+ sample: 192.0.2.91
+ created_on:
+ description: the record creation date
+ returned: success
+ type: string
+ sample: 2016-03-25T19:09:42.516553Z
+ data:
+ description: additional record data
+ returned: success, if type is SRV
+ type: dictionary
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: the record id
+ returned: success
+ type: string
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available
+ returned: success
+ type: boolean
+ sample: False
+ meta:
+ description: No documentation available
+ returned: success
+ type: dictionary
+ sample: { auto_added: false }
+ modified_on:
+ description: record modification date
+ returned: success
+ type: string
+ sample: 2016-03-25T19:09:42.516553Z
+ name:
+ description: the record name as FQDN (including _service and _proto for SRV)
+ returned: success
+ type: string
+ sample: www.sample.com
+ priority:
+ description: priority of the MX record
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: whether this record can be proxied through cloudflare
+ returned: success
+ type: boolean
+ sample: False
+ proxied:
+ description: whether the record is proxied through cloudflare
+ returned: success
+ type: boolean
+ sample: False
+ ttl:
+ description: the time-to-live for the record
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: the record type
+ returned: success
+ type: string
+ sample: A
+ zone_id:
+ description: the id of the zone containing the record
+ returned: success
+ type: string
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: the name of the zone containing the record
+ returned: success
+ type: string
+ sample: sample.com
+'''
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.account_api_token = module.params['account_api_token']
+ self.account_email = module.params['account_email']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = module.params['proto']
+ self.record = module.params['record']
+ self.service = module.params['service']
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = module.params['zone']
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME','NS','MX','SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.')
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ def _cf_simple_api_call(self,api_call,method='GET',payload=None):
+ headers = { 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_token,
+ 'Content-Type': 'application/json' }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception, e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: {0}".format(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200,304,400,401,403,429,405,415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call,info['status']))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info ['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(content)
+ except json.JSONDecodeError:
+ error_msg += "; Failed to parse API response: {0}".format(content)
+
+ # received an error status but no data with details on what failed
+ if (info['status'] not in [200,304]) and (result is None):
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'],error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'],chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self,api_call,method='GET',payload=None):
+ result, status = self._cf_simple_api_call(api_call,method,payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call,query = api_call.split('?',1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call,method,payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self,zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self,name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urllib.urlencode({'name' : name})
+ zones,status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self,zone_name=None,type=None,record=None,value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urllib.urlencode(query)
+
+ records,status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self,**kwargs):
+ params = {}
+ for param in ['port','proto','service','solo','type','record','value','weight','zone']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self,param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'],params['type'],search_record,search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self,**kwargs):
+ params = {}
+ for param in ['port','priority','proto','service','ttl','type','record','value','weight','zone']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self,param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in [ 'A','AAAA','CNAME','TXT','MX','NS','SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'],params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'],params['priority'],params['proto'],params['service'],params['weight'],params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'][:-len('.' + params['zone'])],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+ new_record = { "type": params['type'], "ttl": params['ttl'], 'data': srv_data }
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'],params['type'],search_record,search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl'] ):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] > new_record['data']) - (cur_record['data'] < new_record['data']):
+ do_update = True
+ if (type == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id,records[0]['id']),'PUT',new_record)
+ self.changed = True
+ return result,self.changed
+ else:
+ return records,self.changed
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id),'POST',new_record)
+ self.changed = True
+ return result,self.changed
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ account_api_token = dict(required=True, no_log=True, type='str'),
+ account_email = dict(required=True, type='str'),
+ port = dict(required=False, default=None, type='int'),
+ priority = dict(required=False, default=1, type='int'),
+ proto = dict(required=False, default=None, choices=[ 'tcp', 'udp' ], type='str'),
+ record = dict(required=False, default='@', aliases=['name'], type='str'),
+ service = dict(required=False, default=None, type='str'),
+ solo = dict(required=False, default=None, type='bool'),
+ state = dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ timeout = dict(required=False, default=30, type='int'),
+ ttl = dict(required=False, default=1, type='int'),
+ type = dict(required=False, default=None, choices=[ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ], type='str'),
+ value = dict(required=False, default=None, aliases=['content'], type='str'),
+ weight = dict(required=False, default=1, type='int'),
+ zone = dict(required=True, default=None, aliases=['domain'], type='str'),
+ ),
+ supports_check_mode = True,
+ required_if = ([
+ ('state','present',['record','type']),
+ ('type','MX',['priority','value']),
+ ('type','SRV',['port','priority','proto','service','value','weight']),
+ ('type','A',['value']),
+ ('type','AAAA',['value']),
+ ('type','CNAME',['value']),
+ ('type','TXT',['value']),
+ ('type','NS',['value']),
+ ('type','SPF',['value'])
+ ]
+ ),
+ required_one_of = (
+ [['record','value','type']]
+ )
+ )
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result,changed = cf_api.ensure_dns_record()
+ if isinstance(result,list):
+ module.exit_json(changed=changed,result={'record': result[0]})
+ else:
+ module.exit_json(changed=changed,result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/dnsimple.py b/lib/ansible/modules/extras/network/dnsimple.py
new file mode 100644
index 0000000000..48b0003cb4
--- /dev/null
+++ b/lib/ansible/modules/extras/network/dnsimple.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+version_added: "1.6"
+short_description: Interface with dnsimple.com (a DNS hosting service).
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
+options:
+ account_email:
+ description:
+ - "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
+ required: false
+ default: null
+
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for info.
+ required: false
+ default: null
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ required: false
+ default: null
+
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*)
+ required: false
+ default: null
+
+ record_ids:
+ description:
+ - List of records to ensure they either exist or don't exist
+ required: false
+ default: null
+
+ type:
+ description:
+ - The type of DNS record to create
+ required: false
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
+ default: null
+
+ ttl:
+ description:
+ - The TTL to give the new record
+ required: false
+ default: 3600 (one hour)
+
+ value:
+ description:
+ - Record value
+ - "Must be specified when trying to ensure a record exists"
+ required: false
+ default: null
+
+ priority:
+ description:
+ - Record priority
+ required: false
+ default: null
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: false
+ choices: [ 'present', 'absent' ]
+ default: null
+
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with state=present on a record
+ required: false
+ default: null
+
+requirements: [ dnsimple ]
+author: "Alex Coomans (@drcapulet)"
+'''
+
+EXAMPLES = '''
+# authenticate using email and API token
+- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken
+
+# fetch all domains
+- local_action dnsimple
+ register: domains
+
+# fetch my.com domain records
+- local_action: dnsimple domain=my.com state=present
+ register: records
+
+# delete a domain
+- local_action: dnsimple domain=my.com state=absent
+
+# create a test.my.com A record to point to 127.0.0.01
+- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
+ register: record
+
+# and then delete it
+- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
+
+# create a my.com CNAME record to example.com
+- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
+
+# change it's ttl
+- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
+
+# and delete the record
+- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
+
+'''
+
+import os
+try:
+ from dnsimple import DNSimple
+ from dnsimple.dnsimple import DNSimpleException
+ HAS_DNSIMPLE = True
+except ImportError:
+ HAS_DNSIMPLE = False
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ account_email = dict(required=False),
+ account_api_token = dict(required=False, no_log=True),
+ domain = dict(required=False),
+ record = dict(required=False),
+ record_ids = dict(required=False, type='list'),
+ type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
+ ttl = dict(required=False, default=3600, type='int'),
+ value = dict(required=False),
+ priority = dict(required=False, type='int'),
+ state = dict(required=False, choices=['present', 'absent']),
+ solo = dict(required=False, type='bool'),
+ ),
+ required_together = (
+ ['record', 'value']
+ ),
+ supports_check_mode = True,
+ )
+
+ if not HAS_DNSIMPLE:
+ module.fail_json(msg="dnsimple required for this module")
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+
+ if account_email and account_api_token:
+ client = DNSimple(email=account_email, api_token=account_api_token)
+ elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
+ client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
+ else:
+ client = DNSimple()
+
+ try:
+ # Let's figure out what operation we want to do
+
+ # No domain, return a list
+ if not domain:
+ domains = client.domains()
+ module.exit_json(changed=False, result=[d['domain'] for d in domains])
+
+ # Domain & No record
+ if domain and record is None and not record_ids:
+ domains = [d['domain'] for d in client.domains()]
+ if domain.isdigit():
+ dr = next((d for d in domains if d['id'] == int(domain)), None)
+ else:
+ dr = next((d for d in domains if d['name'] == domain), None)
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
+ elif state == 'absent':
+ if dr:
+ if not module.check_mode:
+ client.delete(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
+
+ # need the not none check since record could be an empty string
+ if domain and record is not None:
+ records = [r['record'] for r in client.records(str(domain))]
+
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
+
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ client.delete_record(str(domain), rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['prio'] != priority:
+ data = {}
+ if ttl: data['ttl'] = ttl
+ if priority: data['prio'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ data = {
+ 'name': record,
+ 'record_type': record_type,
+ 'content': value,
+ }
+ if ttl: data['ttl'] = ttl
+ if priority: data['prio'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
+ elif state == 'absent':
+ if rr:
+ if not module.check_mode:
+ client.delete_record(str(domain), rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
+
+ # Make sure these record_ids either all exist or none
+ if domain and record_ids:
+ current_records = [str(r['record']['id']) for r in client.records(str(domain))]
+ wanted_records = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_records) - set(current_records))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+ elif state == 'absent':
+ difference = list(set(wanted_records) & set(current_records))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ client.delete_record(str(domain), rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
+
+ except DNSimpleException, e:
+ module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
+
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/network/dnsmadeeasy.py b/lib/ansible/modules/extras/network/dnsmadeeasy.py
new file mode 100644
index 0000000000..4578b5298b
--- /dev/null
+++ b/lib/ansible/modules/extras/network/dnsmadeeasy.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: dnsmadeeasy
+version_added: "1.3"
+short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
+description:
+ - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)"
+options:
+ account_key:
+ description:
+ - Account API Key.
+ required: true
+ default: null
+
+ account_secret:
+ description:
+ - Account Secret Key.
+ required: true
+ default: null
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution.
+ required: true
+ default: null
+
+ record_name:
+ description:
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument.
+ required: false
+ default: null
+
+ record_type:
+ description:
+ - Record type.
+ required: false
+ choices: [ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ default: null
+
+ record_value:
+ description:
+ - "Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>, SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
+ - "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)"
+ required: false
+ default: null
+
+ record_ttl:
+ description:
+ - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ required: false
+ default: 1800
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: true
+ choices: [ 'present', 'absent' ]
+ default: null
+
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
+notes:
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
+ - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
+
+requirements: [ hashlib, hmac ]
+author: "Brice Burgess (@briceburg)"
+'''
+
+EXAMPLES = '''
+# fetch my.com domain records
+- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present
+ register: response
+
+# create / ensure the presence of a record
+- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1"
+
+# update the previously created record
+- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.0.2.23"
+
+# fetch a specific record
+- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test"
+ register: response
+
+# delete a record / ensure it is absent
+- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test"
+'''
+
+# ============================================
+# DNSMadeEasy module specific support methods.
+#
+
+import urllib
+
+IMPORT_ERROR = None
+try:
+ import json
+ from time import strftime, gmtime
+ import hashlib
+ import hmac
+except ImportError, e:
+ IMPORT_ERROR = str(e)
+
+class DME2:
+
+ def __init__(self, apikey, secret, domain, module):
+ self.module = module
+
+ self.api = apikey
+ self.secret = secret
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
+ self.domain = str(domain)
+ self.domain_map = None # ["domain_name"] => ID
+ self.record_map = None # ["record_name"] => ID
+ self.records = None # ["record_ID"] => <record>
+ self.all_records = None
+
+ # Lookup the domain ID if passed as a domain name vs. ID
+ if not self.domain.isdigit():
+ self.domain = self.getDomainByName(self.domain)['id']
+
+ self.record_url = 'dns/managed/' + str(self.domain) + '/records'
+
+ def _headers(self):
+ currTime = self._get_date()
+ hashstring = self._create_hash(currTime)
+ headers = {'x-dnsme-apiKey': self.api,
+ 'x-dnsme-hmac': hashstring,
+ 'x-dnsme-requestDate': currTime,
+ 'content-type': 'application/json'}
+ return headers
+
+ def _get_date(self):
+ return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
+
+ def _create_hash(self, rightnow):
+ return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
+
+ def query(self, resource, method, data=None):
+ url = self.baseurl + resource
+ if data and not isinstance(data, basestring):
+ data = urllib.urlencode(data)
+
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
+
+ try:
+ return json.load(response)
+ except Exception, e:
+ return {}
+
+ def getDomain(self, domain_id):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.domains.get(domain_id, False)
+
+ def getDomainByName(self, domain_name):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.getDomain(self.domain_map.get(domain_name, 0))
+
+ def getDomains(self):
+ return self.query('dns/managed', 'GET')['data']
+
+ def getRecord(self, record_id):
+ if not self.record_map:
+ self._instMap('record')
+
+ return self.records.get(record_id, False)
+
+ # Try to find a single record matching this one.
+ # How we do this depends on the type of record. For instance, there
+ # can be several MX records for a single record_name while there can
+ # only be a single CNAME for a particular record_name. Note also that
+ # there can be several records with different types for a single name.
+ def getMatchingRecord(self, record_name, record_type, record_value):
+ # Get all the records if not already cached
+ if not self.all_records:
+ self.all_records = self.getRecords()
+
+ if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]:
+ for result in self.all_records:
+ if result['name'] == record_name and result['type'] == record_type:
+ return result
+ return False
+ elif record_type in ["MX", "NS", "TXT", "SRV"]:
+ for result in self.all_records:
+ if record_type == "MX":
+ value = record_value.split(" ")[1]
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
+ else:
+ value = record_value
+ if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
+ return result
+ return False
+ else:
+ raise Exception('record_type not yet supported')
+
+ def getRecords(self):
+ return self.query(self.record_url, 'GET')['data']
+
+ def _instMap(self, type):
+ #@TODO cache this call so it's executed only once per ansible execution
+ map = {}
+ results = {}
+
+ # iterate over e.g. self.getDomains() || self.getRecords()
+ for result in getattr(self, 'get' + type.title() + 's')():
+
+ map[result['name']] = result['id']
+ results[result['id']] = result
+
+ # e.g. self.domain_map || self.record_map
+ setattr(self, type + '_map', map)
+ setattr(self, type + 's', results) # e.g. self.domains || self.records
+
+ def prepareRecord(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def createRecord(self, data):
+ #@TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url, 'POST', data)
+
+ def updateRecord(self, record_id, data):
+ #@TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
+
+ def deleteRecord(self, record_id):
+ #@TODO remove record from the cache when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'DELETE')
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_key=dict(required=True),
+ account_secret=dict(required=True, no_log=True),
+ domain=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ record_name=dict(required=False),
+ record_type=dict(required=False, choices=[
+ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
+ record_value=dict(required=False),
+ record_ttl=dict(required=False, default=1800, type='int'),
+ validate_certs = dict(default='yes', type='bool'),
+ ),
+ required_together=(
+ ['record_value', 'record_ttl', 'record_type']
+ )
+ )
+
+ if IMPORT_ERROR:
+ module.fail_json(msg="Import Error: " + IMPORT_ERROR)
+
+ DME = DME2(module.params["account_key"], module.params[
+ "account_secret"], module.params["domain"], module)
+ state = module.params["state"]
+ record_name = module.params["record_name"]
+ record_type = module.params["record_type"]
+ record_value = module.params["record_value"]
+
+ # Follow Keyword Controlled Behavior
+ if record_name is None:
+ domain_records = DME.getRecords()
+ if not domain_records:
+ module.fail_json(
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
+ module.exit_json(changed=False, result=domain_records)
+
+ # Fetch existing record + Build new one
+ current_record = DME.getMatchingRecord(record_name, record_type, record_value)
+ new_record = {'name': record_name}
+ for i in ["record_value", "record_type", "record_ttl"]:
+ if not module.params[i] is None:
+ new_record[i[len("record_"):]] = module.params[i]
+ # Special handling for mx record
+ if new_record["type"] == "MX":
+ new_record["mxLevel"] = new_record["value"].split(" ")[0]
+ new_record["value"] = new_record["value"].split(" ")[1]
+
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
+ # Compare new record against existing one
+ changed = False
+ if current_record:
+ for i in new_record:
+ if str(current_record[i]) != str(new_record[i]):
+ changed = True
+ new_record['id'] = str(current_record['id'])
+
+ # Follow Keyword Controlled Behavior
+ if state == 'present':
+ # return the record if no value is specified
+ if not "value" in new_record:
+ if not current_record:
+ module.fail_json(
+ msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
+ module.exit_json(changed=False, result=current_record)
+
+ # create record as it does not exist
+ if not current_record:
+ record = DME.createRecord(DME.prepareRecord(new_record))
+ module.exit_json(changed=True, result=record)
+
+ # update the record
+ if changed:
+ DME.updateRecord(
+ current_record['id'], DME.prepareRecord(new_record))
+ module.exit_json(changed=True, result=new_record)
+
+ # return the record (no changes)
+ module.exit_json(changed=False, result=current_record)
+
+ elif state == 'absent':
+ # delete the record if it exists
+ if current_record:
+ DME.deleteRecord(current_record['id'])
+ module.exit_json(changed=True)
+
+ # record does not exist, return w/o change.
+ module.exit_json(changed=False)
+
+ else:
+ module.fail_json(
+ msg="'%s' is an unknown value for the state argument" % state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/network/exoscale/__init__.py b/lib/ansible/modules/extras/network/exoscale/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/exoscale/__init__.py
diff --git a/lib/ansible/modules/extras/network/exoscale/exo_dns_domain.py b/lib/ansible/modules/extras/network/exoscale/exo_dns_domain.py
new file mode 100644
index 0000000000..d886728bea
--- /dev/null
+++ b/lib/ansible/modules/extras/network/exoscale/exo_dns_domain.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: exo_dns_domain
+short_description: Manages domain records on Exoscale DNS API.
+description:
+ - Create and remove domain records.
+version_added: "2.2"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the record.
+ required: true
+ state:
+ description:
+ - State of the resource.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ api_key:
+ description:
+ - API key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_secret:
+ description:
+ - Secret key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_timeout:
+ description:
+ - HTTP timeout to Exoscale DNS API.
+ required: false
+ default: 10
+ api_region:
+ description:
+ - Name of the ini section in the C(cloustack.ini) file.
+ required: false
+ default: cloudstack
+ validate_certs:
+ description:
+ - Validate SSL certs of the Exoscale DNS API.
+ required: false
+ default: true
+requirements:
+ - "python >= 2.6"
+notes:
+ - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack.
+ The config is read from several locations, in the following order.
+ The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables.
+ A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
+ A C(cloudstack.ini) file in the current working directory.
+ A C(.cloudstack.ini) file in the users home directory.
+ Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
+ Use the argument C(api_region) to select the section name, default section is C(cloudstack).
+ - This module does not support multiple A records and will complain properly if you try.
+ - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/.
+ - This module supports check mode and diff.
+'''
+
+EXAMPLES = '''
+# Create a domain.
+- local_action:
+ module: exo_dns_domain
+ name: example.com
+
+# Remove a domain.
+- local_action:
+ module: exo_dns_domain
+ name: example.com
+ state: absent
+'''
+
+RETURN = '''
+---
+exo_dns_domain:
+ description: API domain results
+ returned: success
+ type: dictionary
+ contains:
+ account_id:
+ description: Your account ID
+ returned: success
+ type: int
+ sample: 34569
+ auto_renew:
+ description: Whether domain is auto renewed or not
+ returned: success
+ type: bool
+ sample: false
+ created_at:
+ description: When the domain was created
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ expires_on:
+ description: When the domain expires
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ id:
+ description: ID of the domain
+ returned: success
+ type: int
+ sample: "2016-08-12T15:24:23.989Z"
+ lockable:
+ description: Whether the domain is lockable or not
+ returned: success
+ type: bool
+ sample: true
+ name:
+ description: Domain name
+ returned: success
+ type: string
+ sample: example.com
+ record_count:
+ description: Number of records related to this domain
+ returned: success
+ type: int
+ sample: 5
+ registrant_id:
+ description: ID of the registrant
+ returned: success
+ type: int
+ sample: null
+ service_count:
+ description: Number of services
+ returned: success
+ type: int
+ sample: 0
+ state:
+ description: State of the domain
+ returned: success
+ type: string
+ sample: "hosted"
+ token:
+ description: Token
+ returned: success
+ type: string
+ sample: "r4NzTRp6opIeFKfaFYvOd6MlhGyD07jl"
+ unicode_name:
+ description: Domain name as unicode
+ returned: success
+ type: string
+ sample: "example.com"
+ updated_at:
+ description: When the domain was updated last.
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ user_id:
+ description: ID of the user
+ returned: success
+ type: int
+ sample: null
+ whois_protected:
+ description: Wheter the whois is protected or not
+ returned: success
+ type: bool
+ sample: false
+'''
+
+# import exoscale common
+from ansible.module_utils.exoscale import *
+
+
+class ExoDnsDomain(ExoDns):
+
+ def __init__(self, module):
+ super(ExoDnsDomain, self).__init__(module)
+ self.name = self.module.params.get('name').lower()
+
+ def get_domain(self):
+ domains = self.api_query("/domains", "GET")
+ for z in domains:
+ if z['domain']['name'].lower() == self.name:
+ return z
+ return None
+
+ def present_domain(self):
+ domain = self.get_domain()
+ data = {
+ 'domain': {
+ 'name': self.name,
+ }
+ }
+ if not domain:
+ self.result['diff']['after'] = data['domain']
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ domain = self.api_query("/domains", "POST", data)
+ return domain
+
+ def absent_domain(self):
+ domain = self.get_domain()
+ if domain:
+ self.result['diff']['before'] = domain
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.api_query("/domains/%s" % domain['domain']['name'], "DELETE")
+ return domain
+
+ def get_result(self, resource):
+ if resource:
+ self.result['exo_dns_domain'] = resource['domain']
+ return self.result
+
+
+def main():
+ argument_spec = exo_dns_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=exo_dns_required_together(),
+ supports_check_mode=True
+ )
+
+ exo_dns_domain = ExoDnsDomain(module)
+ if module.params.get('state') == "present":
+ resource = exo_dns_domain.present_domain()
+ else:
+ resource = exo_dns_domain.absent_domain()
+ result = exo_dns_domain.get_result(resource)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/exoscale/exo_dns_record.py b/lib/ansible/modules/extras/network/exoscale/exo_dns_record.py
new file mode 100644
index 0000000000..6395990639
--- /dev/null
+++ b/lib/ansible/modules/extras/network/exoscale/exo_dns_record.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: exo_dns_record
+short_description: Manages DNS records on Exoscale DNS.
+description:
+ - Create, update and delete records.
+version_added: "2.2"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the record.
+ required: false
+ default: ""
+ domain:
+ description:
+ - Domain the record is related to.
+ required: true
+ record_type:
+ description:
+ - Type of the record.
+ required: false
+ default: A
+ choices: ['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']
+ aliases: ['rtype', 'type']
+ content:
+ description:
+ - Content of the record.
+ - Required if C(state=present) or C(name="")
+ required: false
+ default: null
+ aliases: ['value', 'address']
+ ttl:
+ description:
+ - TTL of the record in seconds.
+ required: false
+ default: 3600
+ prio:
+ description:
+ - Priority of the record.
+ required: false
+ default: null
+ aliases: ['priority']
+ multiple:
+ description:
+ - Whether there are more than one records with similar C(name).
+ - Only allowed with C(record_type=A).
+ - C(content) will not be updated as it is used as key to find the record.
+ required: false
+ default: null
+ aliases: ['priority']
+ state:
+ description:
+ - State of the record.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ api_key:
+ description:
+ - API key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_secret:
+ description:
+ - Secret key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_timeout:
+ description:
+ - HTTP timeout to Exoscale DNS API.
+ required: false
+ default: 10
+ api_region:
+ description:
+ - Name of the ini section in the C(cloustack.ini) file.
+ required: false
+ default: cloudstack
+ validate_certs:
+ description:
+ - Validate SSL certs of the Exoscale DNS API.
+ required: false
+ default: true
+requirements:
+ - "python >= 2.6"
+notes:
+ - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack.
+ The config is read from several locations, in the following order.
+ The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables.
+ A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
+ A C(cloudstack.ini) file in the current working directory.
+ A C(.cloudstack.ini) file in the users home directory.
+ Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
+ Use the argument C(api_region) to select the section name, default section is C(cloudstack).
+ - This module does not support multiple A records and will complain properly if you try.
+ - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/.
+ - This module supports check mode and diff.
+'''
+
+EXAMPLES = '''
+# Create or update an A record.
+- local_action:
+ module: exo_dns_record
+ name: web-vm-1
+ domain: example.com
+ content: 1.2.3.4
+
+# Update an existing A record with a new IP.
+- local_action:
+ module: exo_dns_record
+ name: web-vm-1
+ domain: example.com
+ content: 1.2.3.5
+
+# Create another A record with same name.
+- local_action:
+ module: exo_dns_record
+ name: web-vm-1
+ domain: example.com
+ content: 1.2.3.6
+ multiple: yes
+
+# Create or update a CNAME record.
+- local_action:
+ module: exo_dns_record
+ name: www
+ domain: example.com
+ record_type: CNAME
+ content: web-vm-1
+
+# Create or update a MX record.
+- local_action:
+ module: exo_dns_record
+ domain: example.com
+ record_type: MX
+ content: mx1.example.com
+ prio: 10
+
+# delete a MX record.
+- local_action:
+ module: exo_dns_record
+ domain: example.com
+ record_type: MX
+ content: mx1.example.com
+ state: absent
+
+# Remove a record.
+- local_action:
+ module: exo_dns_record
+ name: www
+ domain: example.com
+ state: absent
+'''
+
+RETURN = '''
+---
+exo_dns_record:
+ description: API record results
+ returned: success
+ type: dictionary
+ contains:
+ content:
+ description: value of the record
+ returned: success
+ type: string
+ sample: 1.2.3.4
+ created_at:
+ description: When the record was created
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ domain:
+ description: Name of the domain
+ returned: success
+ type: string
+ sample: example.com
+ domain_id:
+ description: ID of the domain
+ returned: success
+ type: int
+ sample: 254324
+ id:
+ description: ID of the record
+ returned: success
+ type: int
+ sample: 254324
+ name:
+ description: name of the record
+ returned: success
+ type: string
+ sample: www
+ parent_id:
+ description: ID of the parent
+ returned: success
+ type: int
+ sample: null
+ prio:
+ description: Priority of the record
+ returned: success
+ type: int
+ sample: 10
+ record_type:
+ description: Priority of the record
+ returned: success
+ type: string
+ sample: A
+ system_record:
+ description: Whether the record is a system record or not
+ returned: success
+ type: bool
+ sample: false
+ ttl:
+ description: Time to live of the record
+ returned: success
+ type: int
+ sample: 3600
+ updated_at:
+ description: When the record was updated
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+'''
+
+# import exoscale common
+from ansible.module_utils.exoscale import *
+
+
+class ExoDnsRecord(ExoDns):
+
+ def __init__(self, module):
+ super(ExoDnsRecord, self).__init__(module)
+
+ self.content = self.module.params.get('content')
+ if self.content:
+ self.content = self.content.lower()
+
+ self.domain = self.module.params.get('domain').lower()
+ self.name = self.module.params.get('name').lower()
+ if self.name == self.domain:
+ self.name = ""
+
+ self.multiple = self.module.params.get('multiple')
+ self.record_type = self.module.params.get('record_type')
+ if self.multiple and self.record_type != 'A':
+ self.module.fail_json("Multiple is only usable with record_type A")
+
+
+ def _create_record(self, record):
+ self.result['changed'] = True
+ data = {
+ 'record': {
+ 'name': self.name,
+ 'record_type': self.record_type,
+ 'content': self.content,
+ 'ttl': self.module.params.get('ttl'),
+ 'prio': self.module.params.get('prio'),
+ }
+ }
+ self.result['diff']['after'] = data['record']
+ if not self.module.check_mode:
+ record = self.api_query("/domains/%s/records" % self.domain, "POST", data)
+ return record
+
+ def _update_record(self, record):
+ data = {
+ 'record': {
+ 'name': self.name,
+ 'content': self.content,
+ 'ttl': self.module.params.get('ttl'),
+ 'prio': self.module.params.get('prio'),
+ }
+ }
+ if self.has_changed(data['record'], record['record']):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ record = self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "PUT", data)
+ return record
+
+ def get_record(self):
+ domain = self.module.params.get('domain')
+ records = self.api_query("/domains/%s/records" % domain, "GET")
+
+ record = None
+ for r in records:
+ found_record = None
+ if r['record']['record_type'] == self.record_type:
+ r_name = r['record']['name'].lower()
+ r_content = r['record']['content'].lower()
+
+ # there are multiple A records but we found an exact match
+ if self.multiple and self.name == r_name and self.content == r_content:
+ record = r
+ break
+
+ # We do not expect to found more then one record with that content
+ if not self.multiple and not self.name and self.content == r_content:
+ found_record = r
+
+ # We do not expect to found more then one record with that name
+ elif not self.multiple and self.name and self.name == r_name:
+ found_record = r
+
+ if record and found_record:
+ self.module.fail_json(msg="More than one record with your params. Use multiple=yes for more than one A record.")
+ if found_record:
+ record = found_record
+ return record
+
+ def present_record(self):
+ record = self.get_record()
+ if not record:
+ record = self._create_record(record);
+ else:
+ record = self._update_record(record);
+ return record
+
+ def absent_record(self):
+ record = self.get_record()
+ if record:
+ self.result['diff']['before'] = record
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "DELETE")
+ return record
+
+ def get_result(self, resource):
+ if resource:
+ self.result['exo_dns_record'] = resource['record']
+ self.result['exo_dns_record']['domain'] = self.domain
+ return self.result
+
+
+def main():
+ argument_spec = exo_dns_argument_spec()
+ argument_spec.update(dict(
+ name=dict(default=""),
+ record_type=dict(choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL'], aliases=['rtype', 'type'], default='A'),
+ content=dict(aliases=['value', 'address']),
+ multiple=(dict(type='bool', default=False)),
+ ttl=dict(type='int', default=3600),
+ prio=dict(type='int', aliases=['priority']),
+ domain=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=exo_dns_required_together(),
+ required_if=[
+ ['state', 'present', ['content']],
+ ['name', '', ['content']],
+ ],
+ required_one_of=[
+ ['content', 'name'],
+ ],
+ supports_check_mode=True,
+ )
+
+ exo_dns_record = ExoDnsRecord(module)
+ if module.params.get('state') == "present":
+ resource = exo_dns_record.present_record()
+ else:
+ resource = exo_dns_record.absent_record()
+
+ result = exo_dns_record.get_result(resource)
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/__init__.py b/lib/ansible/modules/extras/network/f5/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/__init__.py
diff --git a/lib/ansible/modules/extras/network/f5/bigip_device_dns.py b/lib/ansible/modules/extras/network/f5/bigip_device_dns.py
new file mode 100644
index 0000000000..c469fc4bff
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_device_dns.py
@@ -0,0 +1,397 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_device_dns
+short_description: Manage BIG-IP device DNS settings
+description:
+ - Manage BIG-IP device DNS settings
+version_added: "2.2"
+options:
+ cache:
+ description:
+ - Specifies whether the system caches DNS lookups or performs the
+ operation each time a lookup is needed. Please note that this applies
+ only to Access Policy Manager features, such as ACLs, web application
+ rewrites, and authentication.
+ required: false
+ default: disable
+ choices:
+ - enable
+ - disable
+ name_servers:
+ description:
+ - A list of name serverz that the system uses to validate DNS lookups
+ forwarders:
+ description:
+ - A list of BIND servers that the system can use to perform DNS lookups
+ search:
+ description:
+ - A list of domains that the system searches for local domain lookups,
+ to resolve local host names.
+ ip_version:
+ description:
+ - Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
+ required: false
+ choices:
+ - 4
+ - 6
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that an existing variable is set to C(value).
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install requests
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the DNS settings on the BIG-IP
+ bigip_device_dns:
+ name_servers:
+ - 208.67.222.222
+ - 208.67.220.220
+ search:
+ - localdomain
+ - lab.local
+ state: present
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+cache:
+ description: The new value of the DNS caching
+ returned: changed
+ type: string
+ sample: "enabled"
+name_servers:
+ description: List of name servers that were added or removed
+ returned: changed
+ type: list
+ sample: "['192.0.2.10', '172.17.12.10']"
+forwarders:
+ description: List of forwarders that were added or removed
+ returned: changed
+ type: list
+ sample: "['192.0.2.10', '172.17.12.10']"
+search:
+ description: List of search domains that were added or removed
+ returned: changed
+ type: list
+ sample: "['192.0.2.10', '172.17.12.10']"
+ip_version:
+ description: IP version that was set that DNS will specify IP addresses in
+ returned: changed
+ type: int
+ sample: 4
+'''
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+REQUIRED = ['name_servers', 'search', 'forwarders', 'ip_version', 'cache']
+CACHE = ['disable', 'enable']
+IP = [4, 6]
+
+
+class BigIpDeviceDns(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ changed = False
+ state = self.params['state']
+
+ if self.dhcp_enabled():
+ raise F5ModuleError(
+ "DHCP on the mgmt interface must be disabled to make use of " +
+ "this module"
+ )
+
+ if state == 'absent':
+ changed = self.absent()
+ else:
+ changed = self.present()
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def dhcp_enabled(self):
+ r = self.api.tm.sys.dbs.db.load(name='dhclient.mgmt')
+ if r.value == 'enable':
+ return True
+ else:
+ return False
+
+ def read(self):
+ result = dict()
+
+ cache = self.api.tm.sys.dbs.db.load(name='dns.cache')
+ proxy = self.api.tm.sys.dbs.db.load(name='dns.proxy.__iter__')
+ dns = self.api.tm.sys.dns.load()
+
+ result['cache'] = str(cache.value)
+ result['forwarders'] = str(proxy.value).split(' ')
+
+ if hasattr(dns, 'nameServers'):
+ result['name_servers'] = dns.nameServers
+ if hasattr(dns, 'search'):
+ result['search'] = dns.search
+ if hasattr(dns, 'include') and 'options inet6' in dns.include:
+ result['ip_version'] = 6
+ else:
+ result['ip_version'] = 4
+ return result
+
+ def present(self):
+ params = dict()
+ current = self.read()
+
+ # Temporary locations to hold the changed params
+ update = dict(
+ dns=None,
+ forwarders=None,
+ cache=None
+ )
+
+ nameservers = self.params['name_servers']
+ search_domains = self.params['search']
+ ip_version = self.params['ip_version']
+ forwarders = self.params['forwarders']
+ cache = self.params['cache']
+ check_mode = self.params['check_mode']
+
+ if nameservers:
+ if 'name_servers' in current:
+ if nameservers != current['name_servers']:
+ params['nameServers'] = nameservers
+ else:
+ params['nameServers'] = nameservers
+
+ if search_domains:
+ if 'search' in current:
+ if search_domains != current['search']:
+ params['search'] = search_domains
+ else:
+ params['search'] = search_domains
+
+ if ip_version:
+ if 'ip_version' in current:
+ if ip_version != int(current['ip_version']):
+ if ip_version == 6:
+ params['include'] = 'options inet6'
+ elif ip_version == 4:
+ params['include'] = ''
+ else:
+ if ip_version == 6:
+ params['include'] = 'options inet6'
+ elif ip_version == 4:
+ params['include'] = ''
+
+ if params:
+ self.cparams.update(camel_dict_to_snake_dict(params))
+
+ if 'include' in params:
+ del self.cparams['include']
+ if params['include'] == '':
+ self.cparams['ip_version'] = 4
+ else:
+ self.cparams['ip_version'] = 6
+
+ update['dns'] = params.copy()
+ params = dict()
+
+ if forwarders:
+ if 'forwarders' in current:
+ if forwarders != current['forwarders']:
+ params['forwarders'] = forwarders
+ else:
+ params['forwarders'] = forwarders
+
+ if params:
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['forwarders'] = ' '.join(params['forwarders'])
+ params = dict()
+
+ if cache:
+ if 'cache' in current:
+ if cache != current['cache']:
+ params['cache'] = cache
+
+ if params:
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['cache'] = params['cache']
+ params = dict()
+
+ if self.cparams:
+ changed = True
+ if check_mode:
+ return changed
+ else:
+ return False
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ cache = api.tm.sys.dbs.db.load(name='dns.cache')
+ proxy = api.tm.sys.dbs.db.load(name='dns.proxy.__iter__')
+ dns = api.tm.sys.dns.load()
+
+ # Empty values can be supplied, but you cannot supply the
+ # None value, so we check for that specifically
+ if update['cache'] is not None:
+ cache.update(value=update['cache'])
+ if update['forwarders'] is not None:
+ proxy.update(value=update['forwarders'])
+ if update['dns'] is not None:
+ dns.update(**update['dns'])
+ return changed
+
+ def absent(self):
+ params = dict()
+ current = self.read()
+
+ # Temporary locations to hold the changed params
+ update = dict(
+ dns=None,
+ forwarders=None
+ )
+
+ nameservers = self.params['name_servers']
+ search_domains = self.params['search']
+ forwarders = self.params['forwarders']
+ check_mode = self.params['check_mode']
+
+ if forwarders and 'forwarders' in current:
+ set_current = set(current['forwarders'])
+ set_new = set(forwarders)
+
+ forwarders = set_current - set_new
+ if forwarders != set_current:
+ forwarders = list(forwarders)
+ params['forwarders'] = ' '.join(forwarders)
+
+ if params:
+ changed = True
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['forwarders'] = params['forwarders']
+ params = dict()
+
+ if nameservers and 'name_servers' in current:
+ set_current = set(current['name_servers'])
+ set_new = set(nameservers)
+
+ nameservers = set_current - set_new
+ if nameservers != set_current:
+ params['nameServers'] = list(nameservers)
+
+ if search_domains and 'search' in current:
+ set_current = set(current['search'])
+ set_new = set(search_domains)
+
+ search_domains = set_current - set_new
+ if search_domains != set_current:
+ params['search'] = list(search_domains)
+
+ if params:
+ changed = True
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['dns'] = params.copy()
+ params = dict()
+
+ if not self.cparams:
+ return False
+
+ if check_mode:
+ return changed
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ proxy = api.tm.sys.dbs.db.load(name='dns.proxy.__iter__')
+ dns = api.tm.sys.dns.load()
+
+ if update['forwarders'] is not None:
+ proxy.update(value=update['forwarders'])
+ if update['dns'] is not None:
+ dns.update(**update['dns'])
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ cache=dict(required=False, choices=CACHE, default=None),
+ name_servers=dict(required=False, default=None, type='list'),
+ forwarders=dict(required=False, default=None, type='list'),
+ search=dict(required=False, default=None, type='list'),
+ ip_version=dict(required=False, default=None, choices=IP, type='int')
+ )
+ argument_spec.update(meta_args)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[REQUIRED],
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpDeviceDns(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_device_ntp.py b/lib/ansible/modules/extras/network/f5/bigip_device_ntp.py
new file mode 100644
index 0000000000..6dab16a3cb
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_device_ntp.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_device_ntp
+short_description: Manage NTP servers on a BIG-IP
+description:
+ - Manage NTP servers on a BIG-IP
+version_added: "2.2"
+options:
+ ntp_servers:
+ description:
+ - A list of NTP servers to set on the device. At least one of C(ntp_servers)
+ or C(timezone) is required.
+ required: false
+ default: []
+ state:
+ description:
+ - The state of the NTP servers on the system. When C(present), guarantees
+ that the NTP servers are set on the system. When C(absent), removes the
+ specified NTP servers from the device configuration.
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+ timezone:
+ description:
+ - The timezone to set for NTP lookups. At least one of C(ntp_servers) or
+ C(timezone) is required.
+ default: UTC
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set NTP server
+ bigip_device_ntp:
+ ntp_servers:
+ - "192.0.2.23"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Set timezone
+ bigip_device_ntp:
+ password: "secret"
+ server: "lb.mydomain.com"
+ timezone: "America/Los_Angeles"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ntp_servers:
+ description: The NTP servers that were set on the device
+ returned: changed
+ type: list
+ sample: ["192.0.2.23", "192.0.2.42"]
+timezone:
+ description: The timezone that was set on the device
+ returned: changed
+ type: string
+ sample: "true"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpDeviceNtp(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ changed = False
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ if 'servers' in self.cparams:
+ self.cparams['ntp_servers'] = self.cparams.pop('servers')
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ r = self.api.tm.sys.ntp.load()
+
+ if hasattr(r, 'servers'):
+ # Deliberately using sets to supress duplicates
+ p['servers'] = set([str(x) for x in r.servers])
+ if hasattr(r, 'timezone'):
+ p['timezone'] = str(r.timezone)
+ return p
+
+ def present(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ ntp_servers = self.params['ntp_servers']
+ timezone = self.params['timezone']
+
+ # NTP servers can be set independently
+ if ntp_servers is not None:
+ if 'servers' in current:
+ items = set(ntp_servers)
+ if items != current['servers']:
+ params['servers'] = list(ntp_servers)
+ else:
+ params['servers'] = ntp_servers
+
+ # Timezone can be set independently
+ if timezone is not None:
+ if 'timezone' in current and current['timezone'] != timezone:
+ params['timezone'] = timezone
+
+ if params:
+ changed = True
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ r = self.api.tm.sys.ntp.load()
+ r.update(**params)
+ r.refresh()
+
+ return changed
+
+ def absent(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ ntp_servers = self.params['ntp_servers']
+
+ if not ntp_servers:
+ raise F5ModuleError(
+ "Absent can only be used when removing NTP servers"
+ )
+
+ if ntp_servers and 'servers' in current:
+ servers = current['servers']
+ new_servers = [x for x in servers if x not in ntp_servers]
+
+ if servers != new_servers:
+ params['servers'] = new_servers
+
+ if params:
+ changed = True
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ r = self.api.tm.sys.ntp.load()
+ r.update(**params)
+ r.refresh()
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ ntp_servers=dict(required=False, type='list', default=None),
+ timezone=dict(default=None, required=False)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['ntp_servers', 'timezone']
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpDeviceNtp(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_device_sshd.py b/lib/ansible/modules/extras/network/f5/bigip_device_sshd.py
new file mode 100644
index 0000000000..e7a87a4e08
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_device_sshd.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_device_sshd
+short_description: Manage the SSHD settings of a BIG-IP
+description:
+ - Manage the SSHD settings of a BIG-IP
+version_added: "2.2"
+options:
+ allow:
+ description:
+ - Specifies, if you have enabled SSH access, the IP address or address
+ range for other systems that can use SSH to communicate with this
+ system.
+ choices:
+ - all
+ - IP address, such as 172.27.1.10
+ - IP range, such as 172.27.*.* or 172.27.0.0/255.255.0.0
+ banner:
+ description:
+ - Whether to enable the banner or not.
+ required: false
+ choices:
+ - enabled
+ - disabled
+ banner_text:
+ description:
+ - Specifies the text to include on the pre-login banner that displays
+ when a user attempts to login to the system using SSH.
+ required: false
+ inactivity_timeout:
+ description:
+ - Specifies the number of seconds before inactivity causes an SSH
+ session to log out.
+ required: false
+ log_level:
+ description:
+ - Specifies the minimum SSHD message level to include in the system log.
+ choices:
+ - debug
+ - debug1
+ - debug2
+ - debug3
+ - error
+ - fatal
+ - info
+ - quiet
+ - verbose
+ login:
+ description:
+ - Specifies, when checked C(enabled), that the system accepts SSH
+ communications.
+ choices:
+ - enabled
+ - disabled
+ required: false
+ port:
+ description:
+ - Port that you want the SSH daemon to run on.
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host This is as easy as pip
+ install f5-sdk.
+ - Requires BIG-IP version 12.0.0 or greater
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the banner for the SSHD service from a string
+ bigip_device_sshd:
+ banner: "enabled"
+ banner_text: "banner text goes here"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Set the banner for the SSHD service from a file
+ bigip_device_sshd:
+ banner: "enabled"
+ banner_text: "{{ lookup('file', '/path/to/file') }}"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Set the SSHD service to run on port 2222
+ bigip_device_sshd:
+ password: "secret"
+ port: 2222
+ server: "lb.mydomain.com"
+ user: "admin"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+allow:
+ description: |
+ Specifies, if you have enabled SSH access, the IP address or address
+ range for other systems that can use SSH to communicate with this
+ system.
+ returned: changed
+ type: list
+ sample: "192.0.2.*"
+banner:
+ description: Whether the banner is enabled or not.
+ returned: changed
+ type: string
+ sample: "true"
+banner_text:
+ description: |
+ Specifies the text included on the pre-login banner that
+ displays when a user attempts to login to the system using SSH.
+ returned: changed and success
+ type: string
+ sample: "This is a corporate device. Connecting to it without..."
+inactivity_timeout:
+ description: |
+ The number of seconds before inactivity causes an SSH.
+ session to log out
+ returned: changed
+ type: int
+ sample: "10"
+log_level:
+ description: The minimum SSHD message level to include in the system log.
+ returned: changed
+ type: string
+ sample: "debug"
+login:
+ description: Specifies that the system accepts SSH communications or not.
+ return: changed
+ type: bool
+ sample: true
+port:
+ description: Port that you want the SSH daemon to run on.
+ return: changed
+ type: int
+ sample: 22
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+CHOICES = ['enabled', 'disabled']
+LEVELS = ['debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal', 'info',
+ 'quiet', 'verbose']
+
+
+class BigIpDeviceSshd(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def update(self):
+ changed = False
+ current = self.read()
+ params = dict()
+
+ allow = self.params['allow']
+ banner = self.params['banner']
+ banner_text = self.params['banner_text']
+ timeout = self.params['inactivity_timeout']
+ log_level = self.params['log_level']
+ login = self.params['login']
+ port = self.params['port']
+ check_mode = self.params['check_mode']
+
+ if allow:
+ if 'allow' in current:
+ items = set(allow)
+ if items != current['allow']:
+ params['allow'] = list(items)
+ else:
+ params['allow'] = allow
+
+ if banner:
+ if 'banner' in current:
+ if banner != current['banner']:
+ params['banner'] = banner
+ else:
+ params['banner'] = banner
+
+ if banner_text:
+ if 'banner_text' in current:
+ if banner_text != current['banner_text']:
+ params['bannerText'] = banner_text
+ else:
+ params['bannerText'] = banner_text
+
+ if timeout:
+ if 'inactivity_timeout' in current:
+ if timeout != current['inactivity_timeout']:
+ params['inactivityTimeout'] = timeout
+ else:
+ params['inactivityTimeout'] = timeout
+
+ if log_level:
+ if 'log_level' in current:
+ if log_level != current['log_level']:
+ params['logLevel'] = log_level
+ else:
+ params['logLevel'] = log_level
+
+ if login:
+ if 'login' in current:
+ if login != current['login']:
+ params['login'] = login
+ else:
+ params['login'] = login
+
+ if port:
+ if 'port' in current:
+ if port != current['port']:
+ params['port'] = port
+ else:
+ params['port'] = port
+
+ if params:
+ changed = True
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ else:
+ return changed
+
+ r = self.api.tm.sys.sshd.load()
+ r.update(**params)
+ r.refresh()
+
+ return changed
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ r = self.api.tm.sys.sshd.load()
+
+ if hasattr(r, 'allow'):
+ # Deliberately using sets to supress duplicates
+ p['allow'] = set([str(x) for x in r.allow])
+ if hasattr(r, 'banner'):
+ p['banner'] = str(r.banner)
+ if hasattr(r, 'bannerText'):
+ p['banner_text'] = str(r.bannerText)
+ if hasattr(r, 'inactivityTimeout'):
+ p['inactivity_timeout'] = str(r.inactivityTimeout)
+ if hasattr(r, 'logLevel'):
+ p['log_level'] = str(r.logLevel)
+ if hasattr(r, 'login'):
+ p['login'] = str(r.login)
+ if hasattr(r, 'port'):
+ p['port'] = int(r.port)
+ return p
+
+ def flush(self):
+ result = dict()
+ changed = False
+
+ try:
+ changed = self.update()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ allow=dict(required=False, default=None, type='list'),
+ banner=dict(required=False, default=None, choices=CHOICES),
+ banner_text=dict(required=False, default=None),
+ inactivity_timeout=dict(required=False, default=None, type='int'),
+ log_level=dict(required=False, default=None, choices=LEVELS),
+ login=dict(required=False, default=None, choices=CHOICES),
+ port=dict(required=False, default=None, type='int'),
+ state=dict(default='present', choices=['present'])
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpDeviceSshd(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_facts.py b/lib/ansible/modules/extras/network/f5/bigip_facts.py
new file mode 100644
index 0000000000..dc6c6b7d1d
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_facts.py
@@ -0,0 +1,1724 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2013, Matt Hite <mhite@hotmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_facts
+short_description: Collect facts from F5 BIG-IP devices
+description:
+ - Collect facts from F5 BIG-IP devices via iControl SOAP API
+version_added: "1.6"
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
+notes:
+ - Requires BIG-IP software version >= 11.4
+ - F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
+ - Best run as a local_action in your playbook
+ - Tested with manager and above account privilege level
+ - C(provision) facts were added in 2.2
+requirements:
+ - bigsuds
+options:
+ session:
+ description:
+ - BIG-IP session support; may be useful to avoid concurrency
+ issues in certain circumstances.
+ required: false
+ default: true
+ choices: []
+ aliases: []
+ include:
+ description:
+ - Fact category or list of categories to collect
+ required: true
+ default: null
+ choices:
+ - address_class
+ - certificate
+ - client_ssl_profile
+ - device
+ - device_group
+ - interface
+ - key
+ - node
+ - pool
+ - provision
+ - rule
+ - self_ip
+ - software
+ - system_info
+ - traffic_group
+ - trunk
+ - virtual_address
+ - virtual_server
+ - vlan
+ aliases: []
+ filter:
+ description:
+ - Shell-style glob matching string used to filter fact keys. Not
+ applicable for software, provision, and system_info fact categories.
+ required: false
+ default: null
+ choices: []
+ aliases: []
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Collect BIG-IP facts
+ bigip_facts:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ include: "interface,vlan"
+ delegate_to: localhost
+'''
+
+try:
+ from suds import MethodNotFound, WebFault
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+import fnmatch
+import re
+import traceback
+
+
+class F5(object):
+ """F5 iControl class.
+
+ F5 BIG-IP iControl API class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, host, user, password, session=False, validate_certs=True, port=443):
+ self.api = bigip_api(host, user, password, validate_certs, port)
+ if session:
+ self.start_session()
+
+ def start_session(self):
+ self.api = self.api.with_session_id()
+
+ def get_api(self):
+ return self.api
+
+ def set_recursive_query_state(self, state):
+ self.api.System.Session.set_recursive_query_state(state)
+
+ def get_recursive_query_state(self):
+ return self.api.System.Session.get_recursive_query_state()
+
+ def enable_recursive_query_state(self):
+ self.set_recursive_query_state('STATE_ENABLED')
+
+ def disable_recursive_query_state(self):
+ self.set_recursive_query_state('STATE_DISABLED')
+
+ def set_active_folder(self, folder):
+ self.api.System.Session.set_active_folder(folder=folder)
+
+ def get_active_folder(self):
+ return self.api.System.Session.get_active_folder()
+
+
+class Interfaces(object):
+ """Interfaces class.
+
+ F5 BIG-IP interfaces class.
+
+ Attributes:
+ api: iControl API instance.
+ interfaces: A list of BIG-IP interface names.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.interfaces = api.Networking.Interfaces.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.interfaces = filter(re_filter.search, self.interfaces)
+
+ def get_list(self):
+ return self.interfaces
+
+ def get_active_media(self):
+ return self.api.Networking.Interfaces.get_active_media(self.interfaces)
+
+ def get_actual_flow_control(self):
+ return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces)
+
+ def get_bundle_state(self):
+ return self.api.Networking.Interfaces.get_bundle_state(self.interfaces)
+
+ def get_description(self):
+ return self.api.Networking.Interfaces.get_description(self.interfaces)
+
+ def get_dual_media_state(self):
+ return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces)
+
+ def get_enabled_state(self):
+ return self.api.Networking.Interfaces.get_enabled_state(self.interfaces)
+
+ def get_if_index(self):
+ return self.api.Networking.Interfaces.get_if_index(self.interfaces)
+
+ def get_learning_mode(self):
+ return self.api.Networking.Interfaces.get_learning_mode(self.interfaces)
+
+ def get_lldp_admin_status(self):
+ return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces)
+
+ def get_lldp_tlvmap(self):
+ return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces)
+
+ def get_mac_address(self):
+ return self.api.Networking.Interfaces.get_mac_address(self.interfaces)
+
+ def get_media(self):
+ return self.api.Networking.Interfaces.get_media(self.interfaces)
+
+ def get_media_option(self):
+ return self.api.Networking.Interfaces.get_media_option(self.interfaces)
+
+ def get_media_option_sfp(self):
+ return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces)
+
+ def get_media_sfp(self):
+ return self.api.Networking.Interfaces.get_media_sfp(self.interfaces)
+
+ def get_media_speed(self):
+ return self.api.Networking.Interfaces.get_media_speed(self.interfaces)
+
+ def get_media_status(self):
+ return self.api.Networking.Interfaces.get_media_status(self.interfaces)
+
+ def get_mtu(self):
+ return self.api.Networking.Interfaces.get_mtu(self.interfaces)
+
+ def get_phy_master_slave_mode(self):
+ return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces)
+
+ def get_prefer_sfp_state(self):
+ return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces)
+
+ def get_flow_control(self):
+ return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces)
+
+ def get_sflow_poll_interval(self):
+ return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces)
+
+ def get_sflow_poll_interval_global(self):
+ return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces)
+
+ def get_sfp_media_state(self):
+ return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces)
+
+ def get_stp_active_edge_port_state(self):
+ return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces)
+
+ def get_stp_enabled_state(self):
+ return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces)
+
+ def get_stp_link_type(self):
+ return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces)
+
+ def get_stp_protocol_detection_reset_state(self):
+ return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces)
+
+
+class SelfIPs(object):
+ """Self IPs class.
+
+ F5 BIG-IP Self IPs class.
+
+ Attributes:
+ api: iControl API instance.
+ self_ips: List of self IPs.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.self_ips = api.Networking.SelfIPV2.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.self_ips = filter(re_filter.search, self.self_ips)
+
+ def get_list(self):
+ return self.self_ips
+
+ def get_address(self):
+ return self.api.Networking.SelfIPV2.get_address(self.self_ips)
+
+ def get_allow_access_list(self):
+ return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips)
+
+ def get_description(self):
+ return self.api.Networking.SelfIPV2.get_description(self.self_ips)
+
+ def get_enforced_firewall_policy(self):
+ return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips)
+
+ def get_floating_state(self):
+ return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips)
+
+ def get_fw_rule(self):
+ return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips)
+
+ def get_netmask(self):
+ return self.api.Networking.SelfIPV2.get_netmask(self.self_ips)
+
+ def get_staged_firewall_policy(self):
+ return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips)
+
+ def get_traffic_group(self):
+ return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips)
+
+ def get_vlan(self):
+ return self.api.Networking.SelfIPV2.get_vlan(self.self_ips)
+
+ def get_is_traffic_group_inherited(self):
+ return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips)
+
+
+class Trunks(object):
+ """Trunks class.
+
+ F5 BIG-IP trunks class.
+
+ Attributes:
+ api: iControl API instance.
+ trunks: List of trunks.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.trunks = api.Networking.Trunk.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.trunks = filter(re_filter.search, self.trunks)
+
+ def get_list(self):
+ return self.trunks
+
+ def get_active_lacp_state(self):
+ return self.api.Networking.Trunk.get_active_lacp_state(self.trunks)
+
+ def get_configured_member_count(self):
+ return self.api.Networking.Trunk.get_configured_member_count(self.trunks)
+
+ def get_description(self):
+ return self.api.Networking.Trunk.get_description(self.trunks)
+
+ def get_distribution_hash_option(self):
+ return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks)
+
+ def get_interface(self):
+ return self.api.Networking.Trunk.get_interface(self.trunks)
+
+ def get_lacp_enabled_state(self):
+ return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks)
+
+ def get_lacp_timeout_option(self):
+ return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks)
+
+ def get_link_selection_policy(self):
+ return self.api.Networking.Trunk.get_link_selection_policy(self.trunks)
+
+ def get_media_speed(self):
+ return self.api.Networking.Trunk.get_media_speed(self.trunks)
+
+ def get_media_status(self):
+ return self.api.Networking.Trunk.get_media_status(self.trunks)
+
+ def get_operational_member_count(self):
+ return self.api.Networking.Trunk.get_operational_member_count(self.trunks)
+
+ def get_stp_enabled_state(self):
+ return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks)
+
+ def get_stp_protocol_detection_reset_state(self):
+ return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks)
+
+
+class Vlans(object):
+ """Vlans class.
+
+ F5 BIG-IP Vlans class.
+
+ Attributes:
+ api: iControl API instance.
+ vlans: List of VLANs.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.vlans = api.Networking.VLAN.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.vlans = filter(re_filter.search, self.vlans)
+
+ def get_list(self):
+ return self.vlans
+
+ def get_auto_lasthop(self):
+ return self.api.Networking.VLAN.get_auto_lasthop(self.vlans)
+
+ def get_cmp_hash_algorithm(self):
+ return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans)
+
+ def get_description(self):
+ return self.api.Networking.VLAN.get_description(self.vlans)
+
+ def get_dynamic_forwarding(self):
+ return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans)
+
+ def get_failsafe_action(self):
+ return self.api.Networking.VLAN.get_failsafe_action(self.vlans)
+
+ def get_failsafe_state(self):
+ return self.api.Networking.VLAN.get_failsafe_state(self.vlans)
+
+ def get_failsafe_timeout(self):
+ return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans)
+
+ def get_if_index(self):
+ return self.api.Networking.VLAN.get_if_index(self.vlans)
+
+ def get_learning_mode(self):
+ return self.api.Networking.VLAN.get_learning_mode(self.vlans)
+
+ def get_mac_masquerade_address(self):
+ return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans)
+
+ def get_member(self):
+ return self.api.Networking.VLAN.get_member(self.vlans)
+
+ def get_mtu(self):
+ return self.api.Networking.VLAN.get_mtu(self.vlans)
+
+ def get_sflow_poll_interval(self):
+ return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans)
+
+ def get_sflow_poll_interval_global(self):
+ return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans)
+
+ def get_sflow_sampling_rate(self):
+ return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans)
+
+ def get_sflow_sampling_rate_global(self):
+ return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans)
+
+ def get_source_check_state(self):
+ return self.api.Networking.VLAN.get_source_check_state(self.vlans)
+
+ def get_true_mac_address(self):
+ return self.api.Networking.VLAN.get_true_mac_address(self.vlans)
+
+ def get_vlan_id(self):
+ return self.api.Networking.VLAN.get_vlan_id(self.vlans)
+
+
+class Software(object):
+ """Software class.
+
+ F5 BIG-IP software class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get_all_software_status(self):
+ return self.api.System.SoftwareManagement.get_all_software_status()
+
+
+class VirtualServers(object):
+ """Virtual servers class.
+
+ F5 BIG-IP virtual servers class.
+
+ Attributes:
+ api: iControl API instance.
+ virtual_servers: List of virtual servers.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.virtual_servers = api.LocalLB.VirtualServer.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.virtual_servers = filter(re_filter.search, self.virtual_servers)
+
+ def get_list(self):
+ return self.virtual_servers
+
+ def get_actual_hardware_acceleration(self):
+ return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers)
+
+ def get_authentication_profile(self):
+ return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers)
+
+ def get_auto_lasthop(self):
+ return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers)
+
+ def get_bw_controller_policy(self):
+ return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers)
+
+ def get_clone_pool(self):
+ return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers)
+
+ def get_cmp_enable_mode(self):
+ return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers)
+
+ def get_connection_limit(self):
+ return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers)
+
+ def get_connection_mirror_state(self):
+ return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers)
+
+ def get_default_pool_name(self):
+ return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers)
+
+ def get_description(self):
+ return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers)
+
+ def get_destination(self):
+ return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers)
+
+ def get_enabled_state(self):
+ return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers)
+
+ def get_enforced_firewall_policy(self):
+ return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers)
+
+ def get_fallback_persistence_profile(self):
+ return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers)
+
+ def get_fw_rule(self):
+ return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers)
+
+ def get_gtm_score(self):
+ return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers)
+
+ def get_last_hop_pool(self):
+ return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers)
+
+ def get_nat64_state(self):
+ return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers)
+
+ def get_object_status(self):
+ return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers)
+
+ def get_persistence_profile(self):
+ return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers)
+
+ def get_profile(self):
+ return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers)
+
+ def get_protocol(self):
+ return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers)
+
+ def get_rate_class(self):
+ return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers)
+
+ def get_rate_limit(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers)
+
+ def get_rate_limit_destination_mask(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers)
+
+ def get_rate_limit_mode(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers)
+
+ def get_rate_limit_source_mask(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers)
+
+ def get_related_rule(self):
+ return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers)
+
+ def get_rule(self):
+ return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers)
+
+ def get_security_log_profile(self):
+ return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers)
+
+ def get_snat_pool(self):
+ return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers)
+
+ def get_snat_type(self):
+ return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers)
+
+ def get_source_address(self):
+ return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers)
+
+ def get_source_address_translation_lsn_pool(self):
+ return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers)
+
+ def get_source_address_translation_snat_pool(self):
+ return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers)
+
+ def get_source_address_translation_type(self):
+ return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers)
+
+ def get_source_port_behavior(self):
+ return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers)
+
+ def get_staged_firewall_policy(self):
+ return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers)
+
+ def get_translate_address_state(self):
+ return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers)
+
+ def get_translate_port_state(self):
+ return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers)
+
+ def get_type(self):
+ return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers)
+
+ def get_vlan(self):
+ return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers)
+
+ def get_wildmask(self):
+ return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers)
+
+
+class Pools(object):
+ """Pools class.
+
+ F5 BIG-IP pools class.
+
+ Attributes:
+ api: iControl API instance.
+ pool_names: List of pool names.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.pool_names = api.LocalLB.Pool.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.pool_names = filter(re_filter.search, self.pool_names)
+
+ def get_list(self):
+ return self.pool_names
+
+ def get_action_on_service_down(self):
+ return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names)
+
+ def get_active_member_count(self):
+ return self.api.LocalLB.Pool.get_active_member_count(self.pool_names)
+
+ def get_aggregate_dynamic_ratio(self):
+ return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names)
+
+ def get_allow_nat_state(self):
+ return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names)
+
+ def get_allow_snat_state(self):
+ return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names)
+
+ def get_client_ip_tos(self):
+ return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names)
+
+ def get_client_link_qos(self):
+ return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names)
+
+ def get_description(self):
+ return self.api.LocalLB.Pool.get_description(self.pool_names)
+
+ def get_gateway_failsafe_device(self):
+ return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names)
+
+ def get_ignore_persisted_weight_state(self):
+ return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names)
+
+ def get_lb_method(self):
+ return self.api.LocalLB.Pool.get_lb_method(self.pool_names)
+
+ def get_member(self):
+ return self.api.LocalLB.Pool.get_member_v2(self.pool_names)
+
+ def get_minimum_active_member(self):
+ return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names)
+
+ def get_minimum_up_member(self):
+ return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names)
+
+ def get_minimum_up_member_action(self):
+ return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names)
+
+ def get_minimum_up_member_enabled_state(self):
+ return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names)
+
+ def get_monitor_association(self):
+ return self.api.LocalLB.Pool.get_monitor_association(self.pool_names)
+
+ def get_monitor_instance(self):
+ return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names)
+
+ def get_object_status(self):
+ return self.api.LocalLB.Pool.get_object_status(self.pool_names)
+
+ def get_profile(self):
+ return self.api.LocalLB.Pool.get_profile(self.pool_names)
+
+ def get_queue_depth_limit(self):
+ return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names)
+
+ def get_queue_on_connection_limit_state(self):
+ return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names)
+
+ def get_queue_time_limit(self):
+ return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names)
+
+ def get_reselect_tries(self):
+ return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names)
+
+ def get_server_ip_tos(self):
+ return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names)
+
+ def get_server_link_qos(self):
+ return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names)
+
+ def get_simple_timeout(self):
+ return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names)
+
+ def get_slow_ramp_time(self):
+ return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names)
+
+
+class Devices(object):
+ """Devices class.
+
+ F5 BIG-IP devices class.
+
+ Attributes:
+ api: iControl API instance.
+ devices: List of devices.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.devices = api.Management.Device.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.devices = filter(re_filter.search, self.devices)
+
+ def get_list(self):
+ return self.devices
+
+ def get_active_modules(self):
+ return self.api.Management.Device.get_active_modules(self.devices)
+
+ def get_base_mac_address(self):
+ return self.api.Management.Device.get_base_mac_address(self.devices)
+
+ def get_blade_addresses(self):
+ return self.api.Management.Device.get_blade_addresses(self.devices)
+
+ def get_build(self):
+ return self.api.Management.Device.get_build(self.devices)
+
+ def get_chassis_id(self):
+ return self.api.Management.Device.get_chassis_id(self.devices)
+
+ def get_chassis_type(self):
+ return self.api.Management.Device.get_chassis_type(self.devices)
+
+ def get_comment(self):
+ return self.api.Management.Device.get_comment(self.devices)
+
+ def get_configsync_address(self):
+ return self.api.Management.Device.get_configsync_address(self.devices)
+
+ def get_contact(self):
+ return self.api.Management.Device.get_contact(self.devices)
+
+ def get_description(self):
+ return self.api.Management.Device.get_description(self.devices)
+
+ def get_edition(self):
+ return self.api.Management.Device.get_edition(self.devices)
+
+ def get_failover_state(self):
+ return self.api.Management.Device.get_failover_state(self.devices)
+
+ def get_local_device(self):
+ return self.api.Management.Device.get_local_device()
+
+ def get_hostname(self):
+ return self.api.Management.Device.get_hostname(self.devices)
+
+ def get_inactive_modules(self):
+ return self.api.Management.Device.get_inactive_modules(self.devices)
+
+ def get_location(self):
+ return self.api.Management.Device.get_location(self.devices)
+
+ def get_management_address(self):
+ return self.api.Management.Device.get_management_address(self.devices)
+
+ def get_marketing_name(self):
+ return self.api.Management.Device.get_marketing_name(self.devices)
+
+ def get_multicast_address(self):
+ return self.api.Management.Device.get_multicast_address(self.devices)
+
+ def get_optional_modules(self):
+ return self.api.Management.Device.get_optional_modules(self.devices)
+
+ def get_platform_id(self):
+ return self.api.Management.Device.get_platform_id(self.devices)
+
+ def get_primary_mirror_address(self):
+ return self.api.Management.Device.get_primary_mirror_address(self.devices)
+
+ def get_product(self):
+ return self.api.Management.Device.get_product(self.devices)
+
+ def get_secondary_mirror_address(self):
+ return self.api.Management.Device.get_secondary_mirror_address(self.devices)
+
+ def get_software_version(self):
+ return self.api.Management.Device.get_software_version(self.devices)
+
+ def get_timelimited_modules(self):
+ return self.api.Management.Device.get_timelimited_modules(self.devices)
+
+ def get_timezone(self):
+ return self.api.Management.Device.get_timezone(self.devices)
+
+ def get_unicast_addresses(self):
+ return self.api.Management.Device.get_unicast_addresses(self.devices)
+
+
+class DeviceGroups(object):
+ """Device groups class.
+
+ F5 BIG-IP device groups class.
+
+ Attributes:
+ api: iControl API instance.
+ device_groups: List of device groups.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.device_groups = api.Management.DeviceGroup.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.device_groups = filter(re_filter.search, self.device_groups)
+
+ def get_list(self):
+ return self.device_groups
+
+ def get_all_preferred_active(self):
+ return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups)
+
+ def get_autosync_enabled_state(self):
+ return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups)
+
+ def get_description(self):
+ return self.api.Management.DeviceGroup.get_description(self.device_groups)
+
+ def get_device(self):
+ return self.api.Management.DeviceGroup.get_device(self.device_groups)
+
+ def get_full_load_on_sync_state(self):
+ return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups)
+
+ def get_incremental_config_sync_size_maximum(self):
+ return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups)
+
+ def get_network_failover_enabled_state(self):
+ return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups)
+
+ def get_sync_status(self):
+ return self.api.Management.DeviceGroup.get_sync_status(self.device_groups)
+
+ def get_type(self):
+ return self.api.Management.DeviceGroup.get_type(self.device_groups)
+
+
+class TrafficGroups(object):
+ """Traffic groups class.
+
+ F5 BIG-IP traffic groups class.
+
+ Attributes:
+ api: iControl API instance.
+ traffic_groups: List of traffic groups.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.traffic_groups = api.Management.TrafficGroup.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.traffic_groups = filter(re_filter.search, self.traffic_groups)
+
+ def get_list(self):
+ return self.traffic_groups
+
+ def get_auto_failback_enabled_state(self):
+ return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups)
+
+ def get_auto_failback_time(self):
+ return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups)
+
+ def get_default_device(self):
+ return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups)
+
+ def get_description(self):
+ return self.api.Management.TrafficGroup.get_description(self.traffic_groups)
+
+ def get_ha_load_factor(self):
+ return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups)
+
+ def get_ha_order(self):
+ return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups)
+
+ def get_is_floating(self):
+ return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups)
+
+ def get_mac_masquerade_address(self):
+ return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups)
+
+ def get_unit_id(self):
+ return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups)
+
+
+class Rules(object):
+ """Rules class.
+
+ F5 BIG-IP iRules class.
+
+ Attributes:
+ api: iControl API instance.
+ rules: List of iRules.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.rules = api.LocalLB.Rule.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.traffic_groups = filter(re_filter.search, self.rules)
+
+ def get_list(self):
+ return self.rules
+
+ def get_description(self):
+ return self.api.LocalLB.Rule.get_description(rule_names=self.rules)
+
+ def get_ignore_vertification(self):
+ return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules)
+
+ def get_verification_status(self):
+ return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules)
+
+ def get_definition(self):
+ return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)]
+
+
+class Nodes(object):
+ """Nodes class.
+
+ F5 BIG-IP nodes class.
+
+ Attributes:
+ api: iControl API instance.
+ nodes: List of nodes.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.nodes = api.LocalLB.NodeAddressV2.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.nodes = filter(re_filter.search, self.nodes)
+
+ def get_list(self):
+ return self.nodes
+
+ def get_address(self):
+ return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes)
+
+ def get_connection_limit(self):
+ return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes)
+
+ def get_description(self):
+ return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes)
+
+ def get_dynamic_ratio(self):
+ return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes)
+
+ def get_monitor_instance(self):
+ return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes)
+
+ def get_monitor_rule(self):
+ return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes)
+
+ def get_monitor_status(self):
+ return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes)
+
+ def get_object_status(self):
+ return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes)
+
+ def get_rate_limit(self):
+ return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes)
+
+ def get_ratio(self):
+ return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes)
+
+ def get_session_status(self):
+ return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes)
+
+
+class VirtualAddresses(object):
+ """Virtual addresses class.
+
+ F5 BIG-IP virtual addresses class.
+
+ Attributes:
+ api: iControl API instance.
+ virtual_addresses: List of virtual addresses.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.virtual_addresses = filter(re_filter.search, self.virtual_addresses)
+
+ def get_list(self):
+ return self.virtual_addresses
+
+ def get_address(self):
+ return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses)
+
+ def get_arp_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses)
+
+ def get_auto_delete_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses)
+
+ def get_connection_limit(self):
+ return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses)
+
+ def get_description(self):
+ return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses)
+
+ def get_enabled_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses)
+
+ def get_icmp_echo_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses)
+
+ def get_is_floating_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses)
+
+ def get_netmask(self):
+ return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses)
+
+ def get_object_status(self):
+ return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses)
+
+ def get_route_advertisement_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses)
+
+ def get_traffic_group(self):
+ return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses)
+
+
+class AddressClasses(object):
+ """Address group/class class.
+
+ F5 BIG-IP address group/class class.
+
+ Attributes:
+ api: iControl API instance.
+ address_classes: List of address classes.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.address_classes = api.LocalLB.Class.get_address_class_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.address_classes = filter(re_filter.search, self.address_classes)
+
+ def get_list(self):
+ return self.address_classes
+
+ def get_address_class(self):
+ key = self.api.LocalLB.Class.get_address_class(self.address_classes)
+ value = self.api.LocalLB.Class.get_address_class_member_data_value(key)
+ result = list(map(zip, [x['members'] for x in key], value))
+ return result
+
+ def get_description(self):
+ return self.api.LocalLB.Class.get_description(self.address_classes)
+
+
+class Certificates(object):
+ """Certificates class.
+
+ F5 BIG-IP certificates class.
+
+ Attributes:
+ api: iControl API instance.
+ certificates: List of certificate identifiers.
+ certificate_list: List of certificate information structures.
+ """
+
+ def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
+ self.api = api
+ self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode)
+ self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list]
+ if regex:
+ re_filter = re.compile(regex)
+ self.certificates = filter(re_filter.search, self.certificates)
+ self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates]
+
+ def get_list(self):
+ return self.certificates
+
+ def get_certificate_list(self):
+ return self.certificate_list
+
+
+class Keys(object):
+ """Keys class.
+
+ F5 BIG-IP keys class.
+
+ Attributes:
+ api: iControl API instance.
+ keys: List of key identifiers.
+ key_list: List of key information structures.
+ """
+
+ def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
+ self.api = api
+ self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode)
+ self.keys = [x['key_info']['id'] for x in self.key_list]
+ if regex:
+ re_filter = re.compile(regex)
+ self.keys = filter(re_filter.search, self.keys)
+ self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys]
+
+ def get_list(self):
+ return self.keys
+
+ def get_key_list(self):
+ return self.key_list
+
+
+class ProfileClientSSL(object):
+ """Client SSL profiles class.
+
+ F5 BIG-IP client SSL profiles class.
+
+ Attributes:
+ api: iControl API instance.
+ profiles: List of client SSL profiles.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.profiles = api.LocalLB.ProfileClientSSL.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.profiles = filter(re_filter.search, self.profiles)
+
+ def get_list(self):
+ return self.profiles
+
+ def get_alert_timeout(self):
+ return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles)
+
+ def get_allow_nonssl_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles)
+
+ def get_authenticate_depth(self):
+ return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles)
+
+ def get_authenticate_once_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles)
+
+ def get_ca_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles)
+
+ def get_cache_size(self):
+ return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles)
+
+ def get_cache_timeout(self):
+ return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles)
+
+ def get_certificate_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles)
+
+ def get_chain_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles)
+
+ def get_cipher_list(self):
+ return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles)
+
+ def get_client_certificate_ca_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles)
+
+ def get_crl_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles)
+
+ def get_default_profile(self):
+ return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles)
+
+ def get_description(self):
+ return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles)
+
+ def get_forward_proxy_ca_certificate_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles)
+
+ def get_forward_proxy_ca_key_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles)
+
+ def get_forward_proxy_ca_passphrase(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles)
+
+ def get_forward_proxy_certificate_extension_include(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles)
+
+ def get_forward_proxy_certificate_lifespan(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles)
+
+ def get_forward_proxy_enabled_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles)
+
+ def get_forward_proxy_lookup_by_ipaddr_port_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles)
+
+ def get_handshake_timeout(self):
+ return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles)
+
+ def get_key_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles)
+
+ def get_modssl_emulation_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles)
+
+ def get_passphrase(self):
+ return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles)
+
+ def get_peer_certification_mode(self):
+ return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles)
+
+ def get_profile_mode(self):
+ return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles)
+
+ def get_renegotiation_maximum_record_delay(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles)
+
+ def get_renegotiation_period(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles)
+
+ def get_renegotiation_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles)
+
+ def get_renegotiation_throughput(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles)
+
+ def get_retain_certificate_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles)
+
+ def get_secure_renegotiation_mode(self):
+ return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles)
+
+ def get_server_name(self):
+ return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles)
+
+ def get_session_ticket_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles)
+
+ def get_sni_default_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles)
+
+ def get_sni_require_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles)
+
+ def get_ssl_option(self):
+ return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles)
+
+ def get_strict_resume_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles)
+
+ def get_unclean_shutdown_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles)
+
+ def get_is_base_profile(self):
+ return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles)
+
+ def get_is_system_profile(self):
+ return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles)
+
+
+class SystemInfo(object):
+ """System information class.
+
+ F5 BIG-IP system information class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get_base_mac_address(self):
+ return self.api.System.SystemInfo.get_base_mac_address()
+
+ def get_blade_temperature(self):
+ return self.api.System.SystemInfo.get_blade_temperature()
+
+ def get_chassis_slot_information(self):
+ return self.api.System.SystemInfo.get_chassis_slot_information()
+
+ def get_globally_unique_identifier(self):
+ return self.api.System.SystemInfo.get_globally_unique_identifier()
+
+ def get_group_id(self):
+ return self.api.System.SystemInfo.get_group_id()
+
+ def get_hardware_information(self):
+ return self.api.System.SystemInfo.get_hardware_information()
+
+ def get_marketing_name(self):
+ return self.api.System.SystemInfo.get_marketing_name()
+
+ def get_product_information(self):
+ return self.api.System.SystemInfo.get_product_information()
+
+ def get_pva_version(self):
+ return self.api.System.SystemInfo.get_pva_version()
+
+ def get_system_id(self):
+ return self.api.System.SystemInfo.get_system_id()
+
+ def get_system_information(self):
+ return self.api.System.SystemInfo.get_system_information()
+
+ def get_time(self):
+ return self.api.System.SystemInfo.get_time()
+
+ def get_time_zone(self):
+ return self.api.System.SystemInfo.get_time_zone()
+
+ def get_uptime(self):
+ return self.api.System.SystemInfo.get_uptime()
+
+
+class ProvisionInfo(object):
+ """Provision information class.
+
+ F5 BIG-IP provision information class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get_list(self):
+ result = []
+ list = self.api.Management.Provision.get_list()
+ for item in list:
+ item = item.lower().replace('tmos_module_', '')
+ result.append(item)
+ return result
+
+ def get_provisioned_list(self):
+ result = []
+ list = self.api.Management.Provision.get_provisioned_list()
+ for item in list:
+ item = item.lower().replace('tmos_module_', '')
+ result.append(item)
+ return result
+
+
+def generate_dict(api_obj, fields):
+ result_dict = {}
+ lists = []
+ supported_fields = []
+ if api_obj.get_list():
+ for field in fields:
+ try:
+ api_response = getattr(api_obj, "get_" + field)()
+ except (MethodNotFound, WebFault):
+ pass
+ else:
+ lists.append(api_response)
+ supported_fields.append(field)
+ for i, j in enumerate(api_obj.get_list()):
+ temp = {}
+ temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)])
+ result_dict[j] = temp
+ return result_dict
+
+
+def generate_simple_dict(api_obj, fields):
+ result_dict = {}
+ for field in fields:
+ try:
+ api_response = getattr(api_obj, "get_" + field)()
+ except (MethodNotFound, WebFault):
+ pass
+ else:
+ result_dict[field] = api_response
+ return result_dict
+
+
+def generate_interface_dict(f5, regex):
+ interfaces = Interfaces(f5.get_api(), regex)
+ fields = ['active_media', 'actual_flow_control', 'bundle_state',
+ 'description', 'dual_media_state', 'enabled_state', 'if_index',
+ 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap',
+ 'mac_address', 'media', 'media_option', 'media_option_sfp',
+ 'media_sfp', 'media_speed', 'media_status', 'mtu',
+ 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control',
+ 'sflow_poll_interval', 'sflow_poll_interval_global',
+ 'sfp_media_state', 'stp_active_edge_port_state',
+ 'stp_enabled_state', 'stp_link_type',
+ 'stp_protocol_detection_reset_state']
+ return generate_dict(interfaces, fields)
+
+
+def generate_self_ip_dict(f5, regex):
+ self_ips = SelfIPs(f5.get_api(), regex)
+ fields = ['address', 'allow_access_list', 'description',
+ 'enforced_firewall_policy', 'floating_state', 'fw_rule',
+ 'netmask', 'staged_firewall_policy', 'traffic_group',
+ 'vlan', 'is_traffic_group_inherited']
+ return generate_dict(self_ips, fields)
+
+
+def generate_trunk_dict(f5, regex):
+ trunks = Trunks(f5.get_api(), regex)
+ fields = ['active_lacp_state', 'configured_member_count', 'description',
+ 'distribution_hash_option', 'interface', 'lacp_enabled_state',
+ 'lacp_timeout_option', 'link_selection_policy', 'media_speed',
+ 'media_status', 'operational_member_count', 'stp_enabled_state',
+ 'stp_protocol_detection_reset_state']
+ return generate_dict(trunks, fields)
+
+
+def generate_vlan_dict(f5, regex):
+ vlans = Vlans(f5.get_api(), regex)
+ fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description',
+ 'dynamic_forwarding', 'failsafe_action', 'failsafe_state',
+ 'failsafe_timeout', 'if_index', 'learning_mode',
+ 'mac_masquerade_address', 'member', 'mtu',
+ 'sflow_poll_interval', 'sflow_poll_interval_global',
+ 'sflow_sampling_rate', 'sflow_sampling_rate_global',
+ 'source_check_state', 'true_mac_address', 'vlan_id']
+ return generate_dict(vlans, fields)
+
+
+def generate_vs_dict(f5, regex):
+ virtual_servers = VirtualServers(f5.get_api(), regex)
+ fields = ['actual_hardware_acceleration', 'authentication_profile',
+ 'auto_lasthop', 'bw_controller_policy', 'clone_pool',
+ 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state',
+ 'default_pool_name', 'description', 'destination',
+ 'enabled_state', 'enforced_firewall_policy',
+ 'fallback_persistence_profile', 'fw_rule', 'gtm_score',
+ 'last_hop_pool', 'nat64_state', 'object_status',
+ 'persistence_profile', 'profile', 'protocol',
+ 'rate_class', 'rate_limit', 'rate_limit_destination_mask',
+ 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule',
+ 'rule', 'security_log_profile', 'snat_pool', 'snat_type',
+ 'source_address', 'source_address_translation_lsn_pool',
+ 'source_address_translation_snat_pool',
+ 'source_address_translation_type', 'source_port_behavior',
+ 'staged_firewall_policy', 'translate_address_state',
+ 'translate_port_state', 'type', 'vlan', 'wildmask']
+ return generate_dict(virtual_servers, fields)
+
+
+def generate_pool_dict(f5, regex):
+ pools = Pools(f5.get_api(), regex)
+ fields = ['action_on_service_down', 'active_member_count',
+ 'aggregate_dynamic_ratio', 'allow_nat_state',
+ 'allow_snat_state', 'client_ip_tos', 'client_link_qos',
+ 'description', 'gateway_failsafe_device',
+ 'ignore_persisted_weight_state', 'lb_method', 'member',
+ 'minimum_active_member', 'minimum_up_member',
+ 'minimum_up_member_action', 'minimum_up_member_enabled_state',
+ 'monitor_association', 'monitor_instance', 'object_status',
+ 'profile', 'queue_depth_limit',
+ 'queue_on_connection_limit_state', 'queue_time_limit',
+ 'reselect_tries', 'server_ip_tos', 'server_link_qos',
+ 'simple_timeout', 'slow_ramp_time']
+ return generate_dict(pools, fields)
+
+
+def generate_device_dict(f5, regex):
+ devices = Devices(f5.get_api(), regex)
+ fields = ['active_modules', 'base_mac_address', 'blade_addresses',
+ 'build', 'chassis_id', 'chassis_type', 'comment',
+ 'configsync_address', 'contact', 'description', 'edition',
+ 'failover_state', 'hostname', 'inactive_modules', 'location',
+ 'management_address', 'marketing_name', 'multicast_address',
+ 'optional_modules', 'platform_id', 'primary_mirror_address',
+ 'product', 'secondary_mirror_address', 'software_version',
+ 'timelimited_modules', 'timezone', 'unicast_addresses']
+ return generate_dict(devices, fields)
+
+
+def generate_device_group_dict(f5, regex):
+ device_groups = DeviceGroups(f5.get_api(), regex)
+ fields = ['all_preferred_active', 'autosync_enabled_state', 'description',
+ 'device', 'full_load_on_sync_state',
+ 'incremental_config_sync_size_maximum',
+ 'network_failover_enabled_state', 'sync_status', 'type']
+ return generate_dict(device_groups, fields)
+
+
+def generate_traffic_group_dict(f5, regex):
+ traffic_groups = TrafficGroups(f5.get_api(), regex)
+ fields = ['auto_failback_enabled_state', 'auto_failback_time',
+ 'default_device', 'description', 'ha_load_factor',
+ 'ha_order', 'is_floating', 'mac_masquerade_address',
+ 'unit_id']
+ return generate_dict(traffic_groups, fields)
+
+
+def generate_rule_dict(f5, regex):
+ rules = Rules(f5.get_api(), regex)
+ fields = ['definition', 'description', 'ignore_vertification',
+ 'verification_status']
+ return generate_dict(rules, fields)
+
+
+def generate_node_dict(f5, regex):
+ nodes = Nodes(f5.get_api(), regex)
+ fields = ['address', 'connection_limit', 'description', 'dynamic_ratio',
+ 'monitor_instance', 'monitor_rule', 'monitor_status',
+ 'object_status', 'rate_limit', 'ratio', 'session_status']
+ return generate_dict(nodes, fields)
+
+
+def generate_virtual_address_dict(f5, regex):
+ virtual_addresses = VirtualAddresses(f5.get_api(), regex)
+ fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit',
+ 'description', 'enabled_state', 'icmp_echo_state',
+ 'is_floating_state', 'netmask', 'object_status',
+ 'route_advertisement_state', 'traffic_group']
+ return generate_dict(virtual_addresses, fields)
+
+
+def generate_address_class_dict(f5, regex):
+ address_classes = AddressClasses(f5.get_api(), regex)
+ fields = ['address_class', 'description']
+ return generate_dict(address_classes, fields)
+
+
+def generate_certificate_dict(f5, regex):
+ certificates = Certificates(f5.get_api(), regex)
+ return dict(zip(certificates.get_list(), certificates.get_certificate_list()))
+
+
+def generate_key_dict(f5, regex):
+ keys = Keys(f5.get_api(), regex)
+ return dict(zip(keys.get_list(), keys.get_key_list()))
+
+
+def generate_client_ssl_profile_dict(f5, regex):
+ profiles = ProfileClientSSL(f5.get_api(), regex)
+ fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth',
+ 'authenticate_once_state', 'ca_file', 'cache_size',
+ 'cache_timeout', 'certificate_file', 'chain_file',
+ 'cipher_list', 'client_certificate_ca_file', 'crl_file',
+ 'default_profile', 'description',
+ 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file',
+ 'forward_proxy_ca_passphrase',
+ 'forward_proxy_certificate_extension_include',
+ 'forward_proxy_certificate_lifespan',
+ 'forward_proxy_enabled_state',
+ 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout',
+ 'key_file', 'modssl_emulation_state', 'passphrase',
+ 'peer_certification_mode', 'profile_mode',
+ 'renegotiation_maximum_record_delay', 'renegotiation_period',
+ 'renegotiation_state', 'renegotiation_throughput',
+ 'retain_certificate_state', 'secure_renegotiation_mode',
+ 'server_name', 'session_ticket_state', 'sni_default_state',
+ 'sni_require_state', 'ssl_option', 'strict_resume_state',
+ 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile']
+ return generate_dict(profiles, fields)
+
+
+def generate_system_info_dict(f5):
+ system_info = SystemInfo(f5.get_api())
+ fields = ['base_mac_address',
+ 'blade_temperature', 'chassis_slot_information',
+ 'globally_unique_identifier', 'group_id',
+ 'hardware_information',
+ 'marketing_name',
+ 'product_information', 'pva_version', 'system_id',
+ 'system_information', 'time',
+ 'time_zone', 'uptime']
+ return generate_simple_dict(system_info, fields)
+
+
+def generate_software_list(f5):
+ software = Software(f5.get_api())
+ software_list = software.get_all_software_status()
+ return software_list
+
+
+def generate_provision_dict(f5):
+ provisioned = ProvisionInfo(f5.get_api())
+ fields = ['list', 'provisioned_list']
+ return generate_simple_dict(provisioned, fields)
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ session=dict(type='bool', default=False),
+ include=dict(type='list', required=True),
+ filter=dict(type='str', required=False),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python suds and bigsuds modules are required")
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ validate_certs = module.params['validate_certs']
+ session = module.params['session']
+ fact_filter = module.params['filter']
+
+ if validate_certs:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ if fact_filter:
+ regex = fnmatch.translate(fact_filter)
+ else:
+ regex = None
+ include = [x.lower() for x in module.params['include']]
+ valid_includes = ('address_class', 'certificate', 'client_ssl_profile',
+ 'device', 'device_group', 'interface', 'key', 'node',
+ 'pool', 'provision', 'rule', 'self_ip', 'software',
+ 'system_info', 'traffic_group', 'trunk',
+ 'virtual_address', 'virtual_server', 'vlan')
+ include_test = map(lambda x: x in valid_includes, include)
+ if not all(include_test):
+ module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include)))
+
+ try:
+ facts = {}
+
+ if len(include) > 0:
+ f5 = F5(server, user, password, session, validate_certs, server_port)
+ saved_active_folder = f5.get_active_folder()
+ saved_recursive_query_state = f5.get_recursive_query_state()
+ if saved_active_folder != "/":
+ f5.set_active_folder("/")
+ if saved_recursive_query_state != "STATE_ENABLED":
+ f5.enable_recursive_query_state()
+
+ if 'interface' in include:
+ facts['interface'] = generate_interface_dict(f5, regex)
+ if 'self_ip' in include:
+ facts['self_ip'] = generate_self_ip_dict(f5, regex)
+ if 'trunk' in include:
+ facts['trunk'] = generate_trunk_dict(f5, regex)
+ if 'vlan' in include:
+ facts['vlan'] = generate_vlan_dict(f5, regex)
+ if 'virtual_server' in include:
+ facts['virtual_server'] = generate_vs_dict(f5, regex)
+ if 'pool' in include:
+ facts['pool'] = generate_pool_dict(f5, regex)
+ if 'provision' in include:
+ facts['provision'] = generate_provision_dict(f5)
+ if 'device' in include:
+ facts['device'] = generate_device_dict(f5, regex)
+ if 'device_group' in include:
+ facts['device_group'] = generate_device_group_dict(f5, regex)
+ if 'traffic_group' in include:
+ facts['traffic_group'] = generate_traffic_group_dict(f5, regex)
+ if 'rule' in include:
+ facts['rule'] = generate_rule_dict(f5, regex)
+ if 'node' in include:
+ facts['node'] = generate_node_dict(f5, regex)
+ if 'virtual_address' in include:
+ facts['virtual_address'] = generate_virtual_address_dict(f5, regex)
+ if 'address_class' in include:
+ facts['address_class'] = generate_address_class_dict(f5, regex)
+ if 'software' in include:
+ facts['software'] = generate_software_list(f5)
+ if 'certificate' in include:
+ facts['certificate'] = generate_certificate_dict(f5, regex)
+ if 'key' in include:
+ facts['key'] = generate_key_dict(f5, regex)
+ if 'client_ssl_profile' in include:
+ facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex)
+ if 'system_info' in include:
+ facts['system_info'] = generate_system_info_dict(f5)
+
+ # restore saved state
+ if saved_active_folder and saved_active_folder != "/":
+ f5.set_active_folder(saved_active_folder)
+ if saved_recursive_query_state and \
+ saved_recursive_query_state != "STATE_ENABLED":
+ f5.set_recursive_query_state(saved_recursive_query_state)
+
+ result = {'ansible_facts': facts}
+
+ except Exception as e:
+ module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc()))
+
+ module.exit_json(**result)
+
+# include magic from lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_gtm_datacenter.py b/lib/ansible/modules/extras/network/f5/bigip_gtm_datacenter.py
new file mode 100644
index 0000000000..90882b6f64
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_gtm_datacenter.py
@@ -0,0 +1,366 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_datacenter
+short_description: Manage Datacenter configuration in BIG-IP
+description:
+ - Manage BIG-IP data center configuration. A data center defines the location
+ where the physical network components reside, such as the server and link
+ objects that share the same subnet on the network. This module is able to
+ manipulate the data center definitions in a BIG-IP
+version_added: "2.2"
+options:
+ contact:
+ description:
+ - The name of the contact for the data center.
+ description:
+ description:
+ - The description of the data center.
+ enabled:
+ description:
+ - Whether the data center should be enabled. At least one of C(state) and
+ C(enabled) are required.
+ choices:
+ - yes
+ - no
+ location:
+ description:
+ - The location of the data center.
+ name:
+ description:
+ - The name of the data center.
+ required: true
+ state:
+ description:
+ - The state of the datacenter on the BIG-IP. When C(present), guarantees
+ that the data center exists. When C(absent) removes the data center
+ from the BIG-IP. C(enabled) will enable the data center and C(disabled)
+ will ensure the data center is disabled. At least one of state and
+ enabled are required.
+ choices:
+ - present
+ - absent
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create data center "New York"
+ bigip_gtm_datacenter:
+ server: "big-ip"
+ name: "New York"
+ location: "222 West 23rd"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+contact:
+ description: The contact that was set on the datacenter
+ returned: changed
+ type: string
+ sample: "admin@root.local"
+description:
+ description: The description that was set for the datacenter
+ returned: changed
+ type: string
+ sample: "Datacenter in NYC"
+enabled:
+ description: Whether the datacenter is enabled or not
+ returned: changed
+ type: bool
+ sample: true
+location:
+ description: The location that is set for the datacenter
+ returned: changed
+ type: string
+ sample: "222 West 23rd"
+name:
+ description: Name of the datacenter being manipulated
+ returned: changed
+ type: string
+ sample: "foo"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpGtmDatacenter(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def create(self):
+ params = dict()
+
+ check_mode = self.params['check_mode']
+ contact = self.params['contact']
+ description = self.params['description']
+ location = self.params['location']
+ name = self.params['name']
+ partition = self.params['partition']
+ enabled = self.params['enabled']
+
+ # Specifically check for None because a person could supply empty
+ # values which would technically still be valid
+ if contact is not None:
+ params['contact'] = contact
+
+ if description is not None:
+ params['description'] = description
+
+ if location is not None:
+ params['location'] = location
+
+ if enabled is not None:
+ params['enabled'] = True
+ else:
+ params['disabled'] = False
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ d = self.api.tm.gtm.datacenters.datacenter
+ d.create(**params)
+
+ if not self.exists():
+ raise F5ModuleError("Failed to create the datacenter")
+ return True
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ r = self.api.tm.gtm.datacenters.datacenter.load(
+ name=name,
+ partition=partition
+ )
+
+ if hasattr(r, 'servers'):
+ # Deliberately using sets to supress duplicates
+ p['servers'] = set([str(x) for x in r.servers])
+ if hasattr(r, 'contact'):
+ p['contact'] = str(r.contact)
+ if hasattr(r, 'location'):
+ p['location'] = str(r.location)
+ if hasattr(r, 'description'):
+ p['description'] = str(r.description)
+ if r.enabled:
+ p['enabled'] = True
+ else:
+ p['enabled'] = False
+ p['name'] = name
+ return p
+
+ def update(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ contact = self.params['contact']
+ description = self.params['description']
+ location = self.params['location']
+ name = self.params['name']
+ partition = self.params['partition']
+ enabled = self.params['enabled']
+
+ if contact is not None:
+ if 'contact' in current:
+ if contact != current['contact']:
+ params['contact'] = contact
+ else:
+ params['contact'] = contact
+
+ if description is not None:
+ if 'description' in current:
+ if description != current['description']:
+ params['description'] = description
+ else:
+ params['description'] = description
+
+ if location is not None:
+ if 'location' in current:
+ if location != current['location']:
+ params['location'] = location
+ else:
+ params['location'] = location
+
+ if enabled is not None:
+ if current['enabled'] != enabled:
+ if enabled is True:
+ params['enabled'] = True
+ params['disabled'] = False
+ else:
+ params['disabled'] = True
+ params['enabled'] = False
+
+ if params:
+ changed = True
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ else:
+ return changed
+
+ r = self.api.tm.gtm.datacenters.datacenter.load(
+ name=name,
+ partition=partition
+ )
+ r.update(**params)
+ r.refresh()
+
+ return True
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ dc = self.api.tm.gtm.datacenters.datacenter.load(**params)
+ dc.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the datacenter")
+ return True
+
+ def present(self):
+ changed = False
+
+ if self.exists():
+ changed = self.update()
+ else:
+ changed = self.create()
+
+ return changed
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+
+ return self.api.tm.gtm.datacenters.datacenter.exists(
+ name=name,
+ partition=partition
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+ enabled = self.params['enabled']
+
+ if state is None and enabled is None:
+ module.fail_json(msg="Neither 'state' nor 'enabled' set")
+
+ try:
+ if state == "present":
+ changed = self.present()
+
+ # Ensure that this field is not returned to the user since it
+ # is not a valid parameter to the module.
+ if 'disabled' in self.cparams:
+ del self.cparams['disabled']
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ contact=dict(required=False, default=None),
+ description=dict(required=False, default=None),
+ enabled=dict(required=False, type='bool', default=None, choices=BOOLEANS),
+ location=dict(required=False, default=None),
+ name=dict(required=True)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpGtmDatacenter(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_gtm_virtual_server.py b/lib/ansible/modules/extras/network/f5/bigip_gtm_virtual_server.py
new file mode 100644
index 0000000000..079709c1b3
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_gtm_virtual_server.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Michael Perzel
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_virtual_server
+short_description: "Manages F5 BIG-IP GTM virtual servers"
+description:
+ - "Manages F5 BIG-IP GTM virtual servers"
+version_added: "2.2"
+author:
+ - Michael Perzel (@perzizzle)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11.4"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Tested with manager and above account privilege level"
+
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Virtual server state
+ required: false
+ default: present
+ choices: ['present', 'absent','enabled','disabled']
+ virtual_server_name:
+ description:
+ - Virtual server name
+ required: True
+ virtual_server_server:
+ description:
+ - Virtual server server
+ required: true
+ host:
+ description:
+ - Virtual server host
+ required: false
+ default: None
+ aliases: ['address']
+ port:
+ description:
+ - Virtual server port
+ required: false
+ default: None
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+ - name: Enable virtual server
+ local_action: >
+ bigip_gtm_virtual_server
+ server=192.0.2.1
+ user=admin
+ password=mysecret
+ virtual_server_name=myname
+ virtual_server_server=myserver
+ state=enabled
+'''
+
+RETURN = '''# '''
+
+try:
+ import bigsuds
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+
+def server_exists(api, server):
+ # hack to determine if virtual server exists
+ result = False
+ try:
+ api.GlobalLB.Server.get_object_status([server])
+ result = True
+ except bigsuds.OperationFailed, e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def virtual_server_exists(api, name, server):
+ # hack to determine if virtual server exists
+ result = False
+ try:
+ virtual_server_id = {'name': name, 'server': server}
+ api.GlobalLB.VirtualServerV2.get_object_status([virtual_server_id])
+ result = True
+ except bigsuds.OperationFailed, e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def add_virtual_server(api, virtual_server_name, virtual_server_server, address, port):
+ addresses = {'address': address, 'port': port}
+ virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server}
+ api.GlobalLB.VirtualServerV2.create([virtual_server_id], [addresses])
+
+
+def remove_virtual_server(api, virtual_server_name, virtual_server_server):
+ virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server}
+ api.GlobalLB.VirtualServerV2.delete_virtual_server([virtual_server_id])
+
+
+def get_virtual_server_state(api, name, server):
+ virtual_server_id = {'name': name, 'server': server}
+ state = api.GlobalLB.VirtualServerV2.get_enabled_state([virtual_server_id])
+ state = state[0].split('STATE_')[1].lower()
+ return state
+
+
+def set_virtual_server_state(api, name, server, state):
+ virtual_server_id = {'name': name, 'server': server}
+ state = "STATE_%s" % state.strip().upper()
+ api.GlobalLB.VirtualServerV2.set_enabled_state([virtual_server_id], [state])
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ host=dict(type='str', default=None, aliases=['address']),
+ port=dict(type='int', default=None),
+ virtual_server_name=dict(type='str', required=True),
+ virtual_server_server=dict(type='str', required=True)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ validate_certs = module.params['validate_certs']
+ user = module.params['user']
+ password = module.params['password']
+ virtual_server_name = module.params['virtual_server_name']
+ virtual_server_server = module.params['virtual_server_server']
+ state = module.params['state']
+ address = module.params['host']
+ port = module.params['port']
+
+ result = {'changed': False} # default
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+
+ if state == 'absent':
+ if virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ remove_virtual_server(api, virtual_server_name, virtual_server_server)
+ result = {'changed': True}
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ elif state == 'present':
+ if virtual_server_name and virtual_server_server and address and port:
+ if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ if server_exists(api, virtual_server_server):
+ add_virtual_server(api, virtual_server_name, virtual_server_server, address, port)
+ result = {'changed': True}
+ else:
+ module.fail_json(msg="server does not exist")
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ else:
+ # virtual server exists -- potentially modify attributes --future feature
+ result = {'changed': False}
+ else:
+ module.fail_json(msg="Address and port are required to create virtual server")
+ elif state == 'enabled':
+ if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ module.fail_json(msg="virtual server does not exist")
+ if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ set_virtual_server_state(api, virtual_server_name, virtual_server_server, state)
+ result = {'changed': True}
+ else:
+ result = {'changed': True}
+ elif state == 'disabled':
+ if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ module.fail_json(msg="virtual server does not exist")
+ if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ set_virtual_server_state(api, virtual_server_name, virtual_server_server, state)
+ result = {'changed': True}
+ else:
+ result = {'changed': True}
+
+ except Exception, e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py b/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py
new file mode 100644
index 0000000000..19292783bc
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Michael Perzel
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_wide_ip
+short_description: "Manages F5 BIG-IP GTM wide ip"
+description:
+ - "Manages F5 BIG-IP GTM wide ip"
+version_added: "2.0"
+author:
+ - Michael Perzel (@perzizzle)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11.4"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Tested with manager and above account privilege level"
+
+requirements:
+ - bigsuds
+options:
+ lb_method:
+ description:
+ - LB method of wide ip
+ required: true
+ choices: ['return_to_dns', 'null', 'round_robin',
+ 'ratio', 'topology', 'static_persist', 'global_availability',
+ 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
+ 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
+ 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
+ wide_ip:
+ description:
+ - Wide IP name
+ required: true
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+ - name: Set lb method
+ local_action: >
+ bigip_gtm_wide_ip
+ server=192.0.2.1
+ user=admin
+ password=mysecret
+ lb_method=round_robin
+ wide_ip=my-wide-ip.example.com
+'''
+
+try:
+ import bigsuds
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+def get_wide_ip_lb_method(api, wide_ip):
+ lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
+ lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
+ return lb_method
+
+def get_wide_ip_pools(api, wide_ip):
+ try:
+ return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
+ except Exception, e:
+ print e
+
+def wide_ip_exists(api, wide_ip):
+ # hack to determine if wide_ip exists
+ result = False
+ try:
+ api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
+ result = True
+ except bigsuds.OperationFailed, e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+def set_wide_ip_lb_method(api, wide_ip, lb_method):
+ lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
+ api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ lb_method_choices = ['return_to_dns', 'null', 'round_robin',
+ 'ratio', 'topology', 'static_persist', 'global_availability',
+ 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
+ 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
+ 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
+ meta_args = dict(
+ lb_method = dict(type='str', required=True, choices=lb_method_choices),
+ wide_ip = dict(type='str', required=True)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ wide_ip = module.params['wide_ip']
+ lb_method = module.params['lb_method']
+ validate_certs = module.params['validate_certs']
+
+ result = {'changed': False} # default
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+
+ if not wide_ip_exists(api, wide_ip):
+ module.fail_json(msg="wide ip %s does not exist" % wide_ip)
+
+ if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
+ if not module.check_mode:
+ set_wide_ip_lb_method(api, wide_ip, lb_method)
+ result = {'changed': True}
+ else:
+ result = {'changed': True}
+
+ except Exception, e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_irule.py b/lib/ansible/modules/extras/network/f5/bigip_irule.py
new file mode 100644
index 0000000000..5e99ec34fa
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_irule.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_irule
+short_description: Manage iRules across different modules on a BIG-IP.
+description:
+ - Manage iRules across different modules on a BIG-IP.
+version_added: "2.2"
+options:
+ content:
+ description:
+ - When used instead of 'src', sets the contents of an iRule directly to
+ the specified value. This is for simple values, but can be used with
+ lookup plugins for anything complex or with formatting. Either one
+ of C(src) or C(content) must be provided.
+ module:
+ description:
+ - The BIG-IP module to add the iRule to.
+ required: true
+ choices:
+ - ltm
+ - gtm
+ partition:
+ description:
+ - The partition to create the iRule on.
+ required: false
+ default: Common
+ name:
+ description:
+ - The name of the iRule.
+ required: true
+ src:
+ description:
+ - The iRule file to interpret and upload to the BIG-IP. Either one
+ of C(src) or C(content) must be provided.
+ required: true
+ state:
+ description:
+ - Whether the iRule should exist or not.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Add the iRule contained in templated irule.tcl to the LTM module
+ bigip_irule:
+ content: "{{ lookup('template', 'irule-template.tcl') }}"
+ module: "ltm"
+ name: "MyiRule"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "present"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Add the iRule contained in static file irule.tcl to the LTM module
+ bigip_irule:
+ module: "ltm"
+ name: "MyiRule"
+ password: "secret"
+ server: "lb.mydomain.com"
+ src: "irule-static.tcl"
+ state: "present"
+ user: "admin"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+module:
+ description: The module that the iRule was added to
+ returned: changed and success
+ type: string
+ sample: "gtm"
+src:
+ description: The filename that included the iRule source
+ returned: changed and success, when provided
+ type: string
+ sample: "/opt/src/irules/example1.tcl"
+name:
+ description: The name of the iRule that was managed
+ returned: changed and success
+ type: string
+ sample: "my-irule"
+content:
+ description: The content of the iRule that was managed
+ returned: changed and success
+ type: string
+ sample: "when LB_FAILED { set wipHost [LB::server addr] }"
+partition:
+ description: The partition in which the iRule was managed
+ returned: changed and success
+ type: string
+ sample: "Common"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+MODULES = ['gtm', 'ltm']
+
+
+class BigIpiRule(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ if kwargs['state'] != 'absent':
+ if not kwargs['content'] and not kwargs['src']:
+ raise F5ModuleError(
+ "Either 'content' or 'src' must be provided"
+ )
+
+ source = kwargs['src']
+ if source:
+ with open(source) as f:
+ kwargs['content'] = f.read()
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if module == 'ltm':
+ r = self.api.tm.ltm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+ elif module == 'gtm':
+ r = self.api.tm.gtm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+
+ if hasattr(r, 'apiAnonymous'):
+ p['content'] = str(r.apiAnonymous)
+ p['name'] = name
+ return p
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+ module = self.params['module']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ if module == 'ltm':
+ r = self.api.tm.ltm.rules.rule.load(**params)
+ r.delete()
+ elif module == 'gtm':
+ r = self.api.tm.gtm.rules.rule.load(**params)
+ r.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the iRule")
+ return True
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if module == 'ltm':
+ return self.api.tm.ltm.rules.rule.exists(
+ name=name,
+ partition=partition
+ )
+ elif module == 'gtm':
+ return self.api.tm.gtm.rules.rule.exists(
+ name=name,
+ partition=partition
+ )
+
+ def present(self):
+ changed = False
+
+ if self.exists():
+ changed = self.update()
+ else:
+ changed = self.create()
+
+ return changed
+
+ def update(self):
+ params = dict()
+ current = self.read()
+ changed = False
+
+ check_mode = self.params['check_mode']
+ content = self.params['content']
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if content is not None:
+ if 'content' in current:
+ if content != current['content']:
+ params['apiAnonymous'] = content
+ else:
+ params['apiAnonymous'] = content
+
+ if params:
+ changed = True
+ params['name'] = name
+ params['partition'] = partition
+ self.cparams = camel_dict_to_snake_dict(params)
+ if 'api_anonymous' in self.cparams:
+ self.cparams['content'] = self.cparams.pop('api_anonymous')
+ if self.params['src']:
+ self.cparams['src'] = self.params['src']
+
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ if module == 'ltm':
+ d = self.api.tm.ltm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+ d.update(**params)
+ d.refresh()
+ elif module == 'gtm':
+ d = self.api.tm.gtm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+ d.update(**params)
+ d.refresh()
+
+ return True
+
+ def create(self):
+ params = dict()
+
+ check_mode = self.params['check_mode']
+ content = self.params['content']
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if check_mode:
+ return True
+
+ if content is not None:
+ params['apiAnonymous'] = content
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if 'api_anonymous' in self.cparams:
+ self.cparams['content'] = self.cparams.pop('api_anonymous')
+ if self.params['src']:
+ self.cparams['src'] = self.params['src']
+
+ if check_mode:
+ return True
+
+ if module == 'ltm':
+ d = self.api.tm.ltm.rules.rule
+ d.create(**params)
+ elif module == 'gtm':
+ d = self.api.tm.gtm.rules.rule
+ d.create(**params)
+
+ if not self.exists():
+ raise F5ModuleError("Failed to create the iRule")
+ return True
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ content=dict(required=False, default=None),
+ src=dict(required=False, default=None),
+ name=dict(required=True),
+ module=dict(required=True, choices=MODULES)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['content', 'src']
+ ]
+ )
+
+ try:
+ obj = BigIpiRule(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_monitor_http.py b/lib/ansible/modules/extras/network/f5/bigip_monitor_http.py
new file mode 100644
index 0000000000..3c303c3ce5
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_monitor_http.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
+# based on Matt Hite's bigip_pool module
+# (c) 2013, Matt Hite <mhite@hotmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_monitor_http
+short_description: "Manages F5 BIG-IP LTM http monitors"
+description:
+ - Manages F5 BIG-IP LTM monitors via iControl SOAP API
+version_added: "1.4"
+author:
+ - Serge van Ginderachter (@srvg)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Monitor state
+ required: false
+ default: 'present'
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - Monitor name
+ required: true
+ default: null
+ aliases:
+ - monitor
+ partition:
+ description:
+ - Partition for the monitor
+ required: false
+ default: 'Common'
+ parent:
+ description:
+ - The parent template of this monitor template
+ required: false
+ default: 'http'
+ parent_partition:
+ description:
+ - Partition for the parent monitor
+ required: false
+ default: 'Common'
+ send:
+ description:
+ - The send string for the monitor call
+ required: true
+ default: none
+ receive:
+ description:
+ - The receive string for the monitor call
+ required: true
+ default: none
+ receive_disable:
+ description:
+ - The receive disable string for the monitor call
+ required: true
+ default: none
+ ip:
+ description:
+ - IP address part of the ipport definition. The default API setting
+ is "0.0.0.0".
+ required: false
+ default: none
+ port:
+ description:
+ - Port address part of the ip/port definition. The default API
+ setting is 0.
+ required: false
+ default: none
+ interval:
+ description:
+ - The interval specifying how frequently the monitor instance
+ of this template will run. By default, this interval is used for up and
+ down states. The default API setting is 5.
+ required: false
+ default: none
+ timeout:
+ description:
+ - The number of seconds in which the node or service must respond to
+ the monitor request. If the target responds within the set time
+ period, it is considered up. If the target does not respond within
+ the set time period, it is considered down. You can change this
+ number to any number you want, however, it should be 3 times the
+ interval number of seconds plus 1 second. The default API setting
+ is 16.
+ required: false
+ default: none
+ time_until_up:
+ description:
+ - Specifies the amount of time in seconds after the first successful
+ response before a node will be marked up. A value of 0 will cause a
+ node to be marked up immediately after a valid response is received
+ from the node. The default API setting is 0.
+ required: false
+ default: none
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: BIGIP F5 | Create HTTP Monitor
+ bigip_monitor_http:
+ state: "present"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_http_monitor"
+ send: "http string to send"
+ receive: "http string to receive"
+ delegate_to: localhost
+
+- name: BIGIP F5 | Remove HTTP Monitor
+ bigip_monitor_http:
+ state: "absent"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_http_monitor"
+ delegate_to: localhost
+'''
+
+TEMPLATE_TYPE = 'TTYPE_HTTP'
+DEFAULT_PARENT_TYPE = 'http'
+
+
+def check_monitor_exists(module, api, monitor, parent):
+ # hack to determine if monitor exists
+ result = False
+ try:
+ ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
+ parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
+ if ttype == TEMPLATE_TYPE and parent == parent2:
+ result = True
+ else:
+ module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def create_monitor(api, monitor, template_attributes):
+ try:
+ api.LocalLB.Monitor.create_template(
+ templates=[{
+ 'template_name': monitor,
+ 'template_type': TEMPLATE_TYPE
+ }],
+ template_attributes=[template_attributes]
+ )
+ except bigsuds.OperationFailed as e:
+ if "already exists" in str(e):
+ return False
+ else:
+ # genuine exception
+ raise
+ return True
+
+
+def delete_monitor(api, monitor):
+ try:
+ api.LocalLB.Monitor.delete_template(template_names=[monitor])
+ except bigsuds.OperationFailed as e:
+ # maybe it was deleted since we checked
+ if "was not found" in str(e):
+ return False
+ else:
+ # genuine exception
+ raise
+ return True
+
+
+def check_string_property(api, monitor, str_property):
+ try:
+ template_prop = api.LocalLB.Monitor.get_template_string_property(
+ [monitor], [str_property['type']]
+ )[0]
+ return str_property == template_prop
+ except bigsuds.OperationFailed as e:
+ # happens in check mode if not created yet
+ if "was not found" in str(e):
+ return True
+ else:
+ # genuine exception
+ raise
+
+
+def set_string_property(api, monitor, str_property):
+ api.LocalLB.Monitor.set_template_string_property(
+ template_names=[monitor],
+ values=[str_property]
+ )
+
+
+def check_integer_property(api, monitor, int_property):
+ try:
+ template_prop = api.LocalLB.Monitor.get_template_integer_property(
+ [monitor], [int_property['type']]
+ )[0]
+ return int_property == template_prop
+ except bigsuds.OperationFailed as e:
+ # happens in check mode if not created yet
+ if "was not found" in str(e):
+ return True
+ else:
+ # genuine exception
+ raise
+
+
+def set_integer_property(api, monitor, int_property):
+ api.LocalLB.Monitor.set_template_integer_property(
+ template_names=[monitor],
+ values=[int_property]
+ )
+
+
+def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
+ changed = False
+ for str_property in template_string_properties:
+ if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
+ if not module.check_mode:
+ set_string_property(api, monitor, str_property)
+ changed = True
+ for int_property in template_integer_properties:
+ if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
+ if not module.check_mode:
+ set_integer_property(api, monitor, int_property)
+ changed = True
+
+ return changed
+
+
+def get_ipport(api, monitor):
+ return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
+
+
+def set_ipport(api, monitor, ipport):
+ try:
+ api.LocalLB.Monitor.set_template_destination(
+ template_names=[monitor], destinations=[ipport]
+ )
+ return True, ""
+ except bigsuds.OperationFailed as e:
+ if "Cannot modify the address type of monitor" in str(e):
+ return False, "Cannot modify the address type of monitor if already assigned to a pool."
+ else:
+ # genuine exception
+ raise
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(required=True),
+ parent=dict(default=DEFAULT_PARENT_TYPE),
+ parent_partition=dict(default='Common'),
+ send=dict(required=False),
+ receive=dict(required=False),
+ receive_disable=dict(required=False),
+ ip=dict(required=False),
+ port=dict(required=False, type='int'),
+ interval=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int'),
+ time_until_up=dict(required=False, type='int', default=0)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ parent_partition = module.params['parent_partition']
+ name = module.params['name']
+ parent = fq_name(parent_partition, module.params['parent'])
+ monitor = fq_name(partition, name)
+ send = module.params['send']
+ receive = module.params['receive']
+ receive_disable = module.params['receive_disable']
+ ip = module.params['ip']
+ port = module.params['port']
+ interval = module.params['interval']
+ timeout = module.params['timeout']
+ time_until_up = module.params['time_until_up']
+
+ # end monitor specific stuff
+
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ monitor_exists = check_monitor_exists(module, api, monitor, parent)
+
+ # ipport is a special setting
+ if monitor_exists:
+ cur_ipport = get_ipport(api, monitor)
+ if ip is None:
+ ip = cur_ipport['ipport']['address']
+ if port is None:
+ port = cur_ipport['ipport']['port']
+ else:
+ if interval is None:
+ interval = 5
+ if timeout is None:
+ timeout = 16
+ if ip is None:
+ ip = '0.0.0.0'
+ if port is None:
+ port = 0
+ if send is None:
+ send = ''
+ if receive is None:
+ receive = ''
+ if receive_disable is None:
+ receive_disable = ''
+
+ # define and set address type
+ if ip == '0.0.0.0' and port == 0:
+ address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
+ elif ip == '0.0.0.0' and port != 0:
+ address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
+ elif ip != '0.0.0.0' and port != 0:
+ address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
+ else:
+ address_type = 'ATYPE_UNSET'
+
+ ipport = {'address_type': address_type,
+ 'ipport': {'address': ip,
+ 'port': port}}
+
+ template_attributes = {'parent_template': parent,
+ 'interval': interval,
+ 'timeout': timeout,
+ 'dest_ipport': ipport,
+ 'is_read_only': False,
+ 'is_directly_usable': True}
+
+ # monitor specific stuff
+ template_string_properties = [{'type': 'STYPE_SEND',
+ 'value': send},
+ {'type': 'STYPE_RECEIVE',
+ 'value': receive},
+ {'type': 'STYPE_RECEIVE_DRAIN',
+ 'value': receive_disable}]
+
+ template_integer_properties = [
+ {
+ 'type': 'ITYPE_INTERVAL',
+ 'value': interval
+ },
+ {
+ 'type': 'ITYPE_TIMEOUT',
+ 'value': timeout
+ },
+ {
+ 'type': 'ITYPE_TIME_UNTIL_UP',
+ 'value': time_until_up
+ }
+ ]
+
+ # main logic, monitor generic
+
+ try:
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if monitor_exists:
+ if not module.check_mode:
+ # possible race condition if same task
+ # on other node deleted it first
+ result['changed'] |= delete_monitor(api, monitor)
+ else:
+ result['changed'] |= True
+ else:
+ # check for monitor itself
+ if not monitor_exists:
+ if not module.check_mode:
+ # again, check changed status here b/c race conditions
+ # if other task already created it
+ result['changed'] |= create_monitor(api, monitor, template_attributes)
+ else:
+ result['changed'] |= True
+
+ # check for monitor parameters
+ # whether it already existed, or was just created, now update
+ # the update functions need to check for check mode but
+ # cannot update settings if it doesn't exist which happens in check mode
+ result['changed'] |= update_monitor_properties(api, module, monitor,
+ template_string_properties,
+ template_integer_properties)
+
+ # we just have to update the ipport if monitor already exists and it's different
+ if monitor_exists and cur_ipport != ipport:
+ set_ipport(api, monitor, ipport)
+ result['changed'] |= True
+ # else: monitor doesn't exist (check mode) or ipport is already ok
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_monitor_tcp.py b/lib/ansible/modules/extras/network/f5/bigip_monitor_tcp.py
new file mode 100644
index 0000000000..45756b1ba2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_monitor_tcp.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_monitor_tcp
+short_description: "Manages F5 BIG-IP LTM tcp monitors"
+description:
+ - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
+version_added: "1.4"
+author:
+ - Serge van Ginderachter (@srvg)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Monitor state
+ required: false
+ default: 'present'
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - Monitor name
+ required: true
+ default: null
+ aliases:
+ - monitor
+ partition:
+ description:
+ - Partition for the monitor
+ required: false
+ default: 'Common'
+ type:
+ description:
+ - The template type of this monitor template
+ required: false
+ default: 'tcp'
+ choices:
+ - TTYPE_TCP
+ - TTYPE_TCP_ECHO
+ - TTYPE_TCP_HALF_OPEN
+ parent:
+ description:
+ - The parent template of this monitor template
+ required: false
+ default: 'tcp'
+ choices:
+ - tcp
+ - tcp_echo
+ - tcp_half_open
+ parent_partition:
+ description:
+ - Partition for the parent monitor
+ required: false
+ default: 'Common'
+ send:
+ description:
+ - The send string for the monitor call
+ required: true
+ default: none
+ receive:
+ description:
+ - The receive string for the monitor call
+ required: true
+ default: none
+ ip:
+ description:
+ - IP address part of the ipport definition. The default API setting
+ is "0.0.0.0".
+ required: false
+ default: none
+ port:
+ description:
+ - Port address part op the ipport definition. The default API
+ setting is 0.
+ required: false
+ default: none
+ interval:
+ description:
+ - The interval specifying how frequently the monitor instance
+ of this template will run. By default, this interval is used for up and
+ down states. The default API setting is 5.
+ required: false
+ default: none
+ timeout:
+ description:
+ - The number of seconds in which the node or service must respond to
+ the monitor request. If the target responds within the set time
+ period, it is considered up. If the target does not respond within
+ the set time period, it is considered down. You can change this
+ number to any number you want, however, it should be 3 times the
+ interval number of seconds plus 1 second. The default API setting
+ is 16.
+ required: false
+ default: none
+ time_until_up:
+ description:
+ - Specifies the amount of time in seconds after the first successful
+ response before a node will be marked up. A value of 0 will cause a
+ node to be marked up immediately after a valid response is received
+ from the node. The default API setting is 0.
+ required: false
+ default: none
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Create TCP Monitor
+ bigip_monitor_tcp:
+ state: "present"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_tcp_monitor"
+ type: "tcp"
+ send: "tcp string to send"
+ receive: "tcp string to receive"
+ delegate_to: localhost
+
+- name: Create TCP half open Monitor
+ bigip_monitor_tcp:
+ state: "present"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_tcp_monitor"
+ type: "tcp"
+ send: "tcp string to send"
+ receive: "http string to receive"
+ delegate_to: localhost
+
+- name: Remove TCP Monitor
+ bigip_monitor_tcp:
+ state: "absent"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_tcp_monitor"
+'''
+
+TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
+TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
+DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
+
+
+def check_monitor_exists(module, api, monitor, parent):
+ # hack to determine if monitor exists
+ result = False
+ try:
+ ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
+ parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
+ if ttype == TEMPLATE_TYPE and parent == parent2:
+ result = True
+ else:
+ module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def create_monitor(api, monitor, template_attributes):
+ try:
+ api.LocalLB.Monitor.create_template(
+ templates=[{
+ 'template_name': monitor,
+ 'template_type': TEMPLATE_TYPE
+ }],
+ template_attributes=[template_attributes]
+ )
+ except bigsuds.OperationFailed as e:
+ if "already exists" in str(e):
+ return False
+ else:
+ # genuine exception
+ raise
+ return True
+
+
+def delete_monitor(api, monitor):
+ try:
+ api.LocalLB.Monitor.delete_template(template_names=[monitor])
+ except bigsuds.OperationFailed as e:
+ # maybe it was deleted since we checked
+ if "was not found" in str(e):
+ return False
+ else:
+ # genuine exception
+ raise
+ return True
+
+
+def check_string_property(api, monitor, str_property):
+ try:
+ template_prop = api.LocalLB.Monitor.get_template_string_property(
+ [monitor], [str_property['type']]
+ )[0]
+ return str_property == template_prop
+ except bigsuds.OperationFailed as e:
+ # happens in check mode if not created yet
+ if "was not found" in str(e):
+ return True
+ else:
+ # genuine exception
+ raise
+
+
+def set_string_property(api, monitor, str_property):
+ api.LocalLB.Monitor.set_template_string_property(
+ template_names=[monitor],
+ values=[str_property]
+ )
+
+
+def check_integer_property(api, monitor, int_property):
+ try:
+ return int_property == api.LocalLB.Monitor.get_template_integer_property(
+ [monitor], [int_property['type']]
+ )[0]
+ except bigsuds.OperationFailed as e:
+ # happens in check mode if not created yet
+ if "was not found" in str(e):
+ return True
+ else:
+ # genuine exception
+ raise
+
+
+def set_integer_property(api, monitor, int_property):
+ api.LocalLB.Monitor.set_template_integer_property(
+ template_names=[monitor],
+ values=[int_property]
+ )
+
+
+def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
+ changed = False
+ for str_property in template_string_properties:
+ if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
+ if not module.check_mode:
+ set_string_property(api, monitor, str_property)
+ changed = True
+
+ for int_property in template_integer_properties:
+ if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
+ if not module.check_mode:
+ set_integer_property(api, monitor, int_property)
+ changed = True
+
+ return changed
+
+
+def get_ipport(api, monitor):
+ return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
+
+
+def set_ipport(api, monitor, ipport):
+ try:
+ api.LocalLB.Monitor.set_template_destination(
+ template_names=[monitor], destinations=[ipport]
+ )
+ return True, ""
+
+ except bigsuds.OperationFailed as e:
+ if "Cannot modify the address type of monitor" in str(e):
+ return False, "Cannot modify the address type of monitor if already assigned to a pool."
+ else:
+ # genuine exception
+ raise
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(required=True),
+ type=dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
+ parent=dict(default=DEFAULT_PARENT),
+ parent_partition=dict(default='Common'),
+ send=dict(required=False),
+ receive=dict(required=False),
+ ip=dict(required=False),
+ port=dict(required=False, type='int'),
+ interval=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int'),
+ time_until_up=dict(required=False, type='int', default=0)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ parent_partition = module.params['parent_partition']
+ name = module.params['name']
+ type = 'TTYPE_' + module.params['type'].upper()
+ parent = fq_name(parent_partition, module.params['parent'])
+ monitor = fq_name(partition, name)
+ send = module.params['send']
+ receive = module.params['receive']
+ ip = module.params['ip']
+ port = module.params['port']
+ interval = module.params['interval']
+ timeout = module.params['timeout']
+ time_until_up = module.params['time_until_up']
+
+ # tcp monitor has multiple types, so overrule
+ global TEMPLATE_TYPE
+ TEMPLATE_TYPE = type
+
+ # end monitor specific stuff
+
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ monitor_exists = check_monitor_exists(module, api, monitor, parent)
+
+ # ipport is a special setting
+ if monitor_exists:
+ # make sure to not update current settings if not asked
+ cur_ipport = get_ipport(api, monitor)
+ if ip is None:
+ ip = cur_ipport['ipport']['address']
+ if port is None:
+ port = cur_ipport['ipport']['port']
+ else:
+ # use API defaults if not defined to create it
+ if interval is None:
+ interval = 5
+ if timeout is None:
+ timeout = 16
+ if ip is None:
+ ip = '0.0.0.0'
+ if port is None:
+ port = 0
+ if send is None:
+ send = ''
+ if receive is None:
+ receive = ''
+
+ # define and set address type
+ if ip == '0.0.0.0' and port == 0:
+ address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
+ elif ip == '0.0.0.0' and port != 0:
+ address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
+ elif ip != '0.0.0.0' and port != 0:
+ address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
+ else:
+ address_type = 'ATYPE_UNSET'
+
+ ipport = {
+ 'address_type': address_type,
+ 'ipport': {
+ 'address': ip,
+ 'port': port
+ }
+ }
+
+ template_attributes = {
+ 'parent_template': parent,
+ 'interval': interval,
+ 'timeout': timeout,
+ 'dest_ipport': ipport,
+ 'is_read_only': False,
+ 'is_directly_usable': True
+ }
+
+ # monitor specific stuff
+ if type == 'TTYPE_TCP':
+ template_string_properties = [
+ {
+ 'type': 'STYPE_SEND',
+ 'value': send
+ },
+ {
+ 'type': 'STYPE_RECEIVE',
+ 'value': receive
+ }
+ ]
+ else:
+ template_string_properties = []
+
+ template_integer_properties = [
+ {
+ 'type': 'ITYPE_INTERVAL',
+ 'value': interval
+ },
+ {
+ 'type': 'ITYPE_TIMEOUT',
+ 'value': timeout
+ },
+ {
+ 'type': 'ITYPE_TIME_UNTIL_UP',
+ 'value': time_until_up
+ }
+ ]
+
+ # main logic, monitor generic
+
+ try:
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if monitor_exists:
+ if not module.check_mode:
+ # possible race condition if same task
+ # on other node deleted it first
+ result['changed'] |= delete_monitor(api, monitor)
+ else:
+ result['changed'] |= True
+ else:
+ # check for monitor itself
+ if not monitor_exists:
+ if not module.check_mode:
+ # again, check changed status here b/c race conditions
+ # if other task already created it
+ result['changed'] |= create_monitor(api, monitor, template_attributes)
+ else:
+ result['changed'] |= True
+
+ # check for monitor parameters
+ # whether it already existed, or was just created, now update
+ # the update functions need to check for check mode but
+ # cannot update settings if it doesn't exist which happens in check mode
+ result['changed'] |= update_monitor_properties(api, module, monitor,
+ template_string_properties,
+ template_integer_properties)
+
+ # we just have to update the ipport if monitor already exists and it's different
+ if monitor_exists and cur_ipport != ipport:
+ set_ipport(api, monitor, ipport)
+ result['changed'] |= True
+ # else: monitor doesn't exist (check mode) or ipport is already ok
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_node.py b/lib/ansible/modules/extras/network/f5/bigip_node.py
new file mode 100644
index 0000000000..53a0b1973f
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_node.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2013, Matt Hite <mhite@hotmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_node
+short_description: "Manages F5 BIG-IP LTM nodes"
+description:
+ - "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
+version_added: "1.4"
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Pool member state
+ required: true
+ default: present
+ choices: ['present', 'absent']
+ aliases: []
+ session_state:
+ description:
+ - Set new session availability status for node
+ version_added: "1.9"
+ required: false
+ default: null
+ choices: ['enabled', 'disabled']
+ aliases: []
+ monitor_state:
+ description:
+ - Set monitor availability status for node
+ version_added: "1.9"
+ required: false
+ default: null
+ choices: ['enabled', 'disabled']
+ aliases: []
+ partition:
+ description:
+ - Partition
+ required: false
+ default: 'Common'
+ choices: []
+ aliases: []
+ name:
+ description:
+ - "Node name"
+ required: false
+ default: null
+ choices: []
+ monitor_type:
+ description:
+ - Monitor rule type when monitors > 1
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: ['and_list', 'm_of_n']
+ aliases: []
+ quorum:
+ description:
+ - Monitor quorum value when monitor_type is m_of_n
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ monitors:
+ description:
+ - Monitor template name list. Always use the full path to the monitor.
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ host:
+ description:
+ - "Node IP. Required when state=present and node does not exist. Error when state=absent."
+ required: true
+ default: null
+ choices: []
+ aliases: ['address', 'ip']
+ description:
+ description:
+ - "Node description."
+ required: false
+ default: null
+ choices: []
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Add node
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ partition: "Common"
+ host: "10.20.30.40"
+ name: "10.20.30.40"
+
+# Note that the BIG-IP automatically names the node using the
+# IP address specified in previous play's host parameter.
+# Future plays referencing this node no longer use the host
+# parameter but instead use the name parameter.
+# Alternatively, you could have specified a name with the
+# name parameter when state=present.
+
+- name: Add node with a single 'ping' monitor
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ partition: "Common"
+ host: "10.20.30.40"
+ name: "mytestserver"
+ monitors:
+ - /Common/icmp
+ delegate_to: localhost
+
+- name: Modify node description
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ partition: "Common"
+ name: "10.20.30.40"
+ description: "Our best server yet"
+ delegate_to: localhost
+
+- name: Delete node
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ partition: "Common"
+ name: "10.20.30.40"
+
+# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
+# General Properties -> State". The following states map to API monitor
+# and session states.
+#
+# Enabled (all traffic allowed):
+# monitor_state=enabled, session_state=enabled
+# Disabled (only persistent or active connections allowed):
+# monitor_state=enabled, session_state=disabled
+# Forced offline (only active connections allowed):
+# monitor_state=disabled, session_state=disabled
+#
+# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
+
+- name: Force node offline
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "mysecret"
+ state: "present"
+ session_state: "disabled"
+ monitor_state: "disabled"
+ partition: "Common"
+ name: "10.20.30.40"
+'''
+
+
+def node_exists(api, address):
+ # hack to determine if node exists
+ result = False
+ try:
+ api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def create_node_address(api, address, name):
+ try:
+ api.LocalLB.NodeAddressV2.create(
+ nodes=[name],
+ addresses=[address],
+ limits=[0]
+ )
+ result = True
+ desc = ""
+ except bigsuds.OperationFailed as e:
+ if "already exists" in str(e):
+ result = False
+ desc = "referenced name or IP already in use"
+ else:
+ # genuine exception
+ raise
+ return (result, desc)
+
+
+def get_node_address(api, name):
+ return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
+
+
+def delete_node_address(api, address):
+ try:
+ api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
+ result = True
+ desc = ""
+ except bigsuds.OperationFailed as e:
+ if "is referenced by a member of pool" in str(e):
+ result = False
+ desc = "node referenced by pool"
+ else:
+ # genuine exception
+ raise
+ return (result, desc)
+
+
+def set_node_description(api, name, description):
+ api.LocalLB.NodeAddressV2.set_description(nodes=[name],
+ descriptions=[description])
+
+
+def get_node_description(api, name):
+ return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
+
+
+def set_node_session_enabled_state(api, name, session_state):
+ session_state = "STATE_%s" % session_state.strip().upper()
+ api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
+ states=[session_state])
+
+
+def get_node_session_status(api, name):
+ result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
+ result = result.split("SESSION_STATUS_")[-1].lower()
+ return result
+
+
+def set_node_monitor_state(api, name, monitor_state):
+ monitor_state = "STATE_%s" % monitor_state.strip().upper()
+ api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
+ states=[monitor_state])
+
+
+def get_node_monitor_status(api, name):
+ result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
+ result = result.split("MONITOR_STATUS_")[-1].lower()
+ return result
+
+
+def get_monitors(api, name):
+ result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0]
+ monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
+ quorum = result['quorum']
+ monitor_templates = result['monitor_templates']
+ return (monitor_type, quorum, monitor_templates)
+
+
+def set_monitors(api, name, monitor_type, quorum, monitor_templates):
+ monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
+ monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
+ api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name],
+ monitor_rules=[monitor_rule])
+
+
+def main():
+ monitor_type_choices = ['and_list', 'm_of_n']
+
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ session_state=dict(type='str', choices=['enabled', 'disabled']),
+ monitor_state=dict(type='str', choices=['enabled', 'disabled']),
+ name=dict(type='str', required=True),
+ host=dict(type='str', aliases=['address', 'ip']),
+ description=dict(type='str'),
+ monitor_type=dict(type='str', choices=monitor_type_choices),
+ quorum=dict(type='int'),
+ monitors=dict(type='list')
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ session_state = module.params['session_state']
+ monitor_state = module.params['monitor_state']
+ host = module.params['host']
+ name = module.params['name']
+ address = fq_name(partition, name)
+ description = module.params['description']
+ monitor_type = module.params['monitor_type']
+ if monitor_type:
+ monitor_type = monitor_type.lower()
+ quorum = module.params['quorum']
+ monitors = module.params['monitors']
+ if monitors:
+ monitors = []
+ for monitor in module.params['monitors']:
+ monitors.append(fq_name(partition, monitor))
+
+ # sanity check user supplied values
+ if state == 'absent' and host is not None:
+ module.fail_json(msg="host parameter invalid when state=absent")
+
+ if monitors:
+ if len(monitors) == 1:
+ # set default required values for single monitor
+ quorum = 0
+ monitor_type = 'single'
+ elif len(monitors) > 1:
+ if not monitor_type:
+ module.fail_json(msg="monitor_type required for monitors > 1")
+ if monitor_type == 'm_of_n' and not quorum:
+ module.fail_json(msg="quorum value required for monitor_type m_of_n")
+ if monitor_type != 'm_of_n':
+ quorum = 0
+ elif monitor_type:
+ # no monitors specified but monitor_type exists
+ module.fail_json(msg="monitor_type require monitors parameter")
+ elif quorum is not None:
+ # no monitors specified but quorum exists
+ module.fail_json(msg="quorum requires monitors parameter")
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if node_exists(api, address):
+ if not module.check_mode:
+ deleted, desc = delete_node_address(api, address)
+ if not deleted:
+ module.fail_json(msg="unable to delete: %s" % desc)
+ else:
+ result = {'changed': True}
+ else:
+ # check-mode return value
+ result = {'changed': True}
+
+ elif state == 'present':
+ if not node_exists(api, address):
+ if host is None:
+ module.fail_json(msg="host parameter required when "
+ "state=present and node does not exist")
+ if not module.check_mode:
+ created, desc = create_node_address(api, address=host, name=address)
+ if not created:
+ module.fail_json(msg="unable to create: %s" % desc)
+ else:
+ result = {'changed': True}
+ if session_state is not None:
+ set_node_session_enabled_state(api, address,
+ session_state)
+ result = {'changed': True}
+ if monitor_state is not None:
+ set_node_monitor_state(api, address, monitor_state)
+ result = {'changed': True}
+ if description is not None:
+ set_node_description(api, address, description)
+ result = {'changed': True}
+ if monitors:
+ set_monitors(api, address, monitor_type, quorum, monitors)
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ else:
+ # node exists -- potentially modify attributes
+ if host is not None:
+ if get_node_address(api, address) != host:
+ module.fail_json(msg="Changing the node address is "
+ "not supported by the API; "
+ "delete and recreate the node.")
+ if session_state is not None:
+ session_status = get_node_session_status(api, address)
+ if session_state == 'enabled' and \
+ session_status == 'forced_disabled':
+ if not module.check_mode:
+ set_node_session_enabled_state(api, address,
+ session_state)
+ result = {'changed': True}
+ elif session_state == 'disabled' and \
+ session_status != 'force_disabled':
+ if not module.check_mode:
+ set_node_session_enabled_state(api, address,
+ session_state)
+ result = {'changed': True}
+ if monitor_state is not None:
+ monitor_status = get_node_monitor_status(api, address)
+ if monitor_state == 'enabled' and \
+ monitor_status == 'forced_down':
+ if not module.check_mode:
+ set_node_monitor_state(api, address,
+ monitor_state)
+ result = {'changed': True}
+ elif monitor_state == 'disabled' and \
+ monitor_status != 'forced_down':
+ if not module.check_mode:
+ set_node_monitor_state(api, address,
+ monitor_state)
+ result = {'changed': True}
+ if description is not None:
+ if get_node_description(api, address) != description:
+ if not module.check_mode:
+ set_node_description(api, address, description)
+ result = {'changed': True}
+ if monitors:
+ t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address)
+ if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
+ if not module.check_mode:
+ set_monitors(api, address, monitor_type, quorum, monitors)
+ result = {'changed': True}
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_pool.py b/lib/ansible/modules/extras/network/f5/bigip_pool.py
new file mode 100644
index 0000000000..69ee1c0750
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_pool.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Matt Hite <mhite@hotmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_pool
+short_description: "Manages F5 BIG-IP LTM pools"
+description:
+ - Manages F5 BIG-IP LTM pools via iControl SOAP API
+version_added: 1.2
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
+notes:
+ - Requires BIG-IP software version >= 11
+ - F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
+ - Best run as a local_action in your playbook
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Pool/pool member state
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ aliases: []
+ name:
+ description:
+ - Pool name
+ required: true
+ default: null
+ choices: []
+ aliases:
+ - pool
+ partition:
+ description:
+ - Partition of pool/pool member
+ required: false
+ default: 'Common'
+ choices: []
+ aliases: []
+ lb_method:
+ description:
+ - Load balancing method
+ version_added: "1.3"
+ required: False
+ default: 'round_robin'
+ choices:
+ - round_robin
+ - ratio_member
+ - least_connection_member
+ - observed_member
+ - predictive_member
+ - ratio_node_address
+ - least_connection_node_address
+ - fastest_node_address
+ - observed_node_address
+ - predictive_node_address
+ - dynamic_ratio
+ - fastest_app_response
+ - least_sessions
+ - dynamic_ratio_member
+ - l3_addr
+ - weighted_least_connection_member
+ - weighted_least_connection_node_address
+ - ratio_session
+ - ratio_least_connection_member
+ - ratio_least_connection_node_address
+ aliases: []
+ monitor_type:
+ description:
+ - Monitor rule type when monitors > 1
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: ['and_list', 'm_of_n']
+ aliases: []
+ quorum:
+ description:
+ - Monitor quorum value when monitor_type is m_of_n
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ monitors:
+ description:
+ - Monitor template name list. Always use the full path to the monitor.
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ slow_ramp_time:
+ description:
+ - Sets the ramp-up time (in seconds) to gradually ramp up the load on
+ newly added or freshly detected up pool members
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ reselect_tries:
+ description:
+ - Sets the number of times the system tries to contact a pool member
+ after a passive failure
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ service_down_action:
+ description:
+ - Sets the action to take when node goes down in pool
+ version_added: "1.3"
+ required: False
+ default: null
+ choices:
+ - none
+ - reset
+ - drop
+ - reselect
+ aliases: []
+ host:
+ description:
+ - "Pool member IP"
+ required: False
+ default: null
+ choices: []
+ aliases:
+ - address
+ port:
+ description:
+ - Pool member port
+ required: False
+ default: null
+ choices: []
+ aliases: []
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Create pool
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ name: "my-pool"
+ partition: "Common"
+ lb_method: "least_connection_member"
+ slow_ramp_time: 120
+ delegate_to: localhost
+
+- name: Modify load balancer method
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ name: "my-pool"
+ partition: "Common"
+ lb_method: "round_robin"
+
+- name: Add pool member
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ name: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+
+- name: Remove pool member from pool
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ name: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+
+- name: Delete pool
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ name: "my-pool"
+ partition: "Common"
+'''
+
+RETURN = '''
+'''
+
+
+def pool_exists(api, pool):
+ # hack to determine if pool exists
+ result = False
+ try:
+ api.LocalLB.Pool.get_object_status(pool_names=[pool])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def create_pool(api, pool, lb_method):
+ # create requires lb_method but we don't want to default
+ # to a value on subsequent runs
+ if not lb_method:
+ lb_method = 'round_robin'
+ lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
+ api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
+ members=[[]])
+
+
+def remove_pool(api, pool):
+ api.LocalLB.Pool.delete_pool(pool_names=[pool])
+
+
+def get_lb_method(api, pool):
+ lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
+ lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
+ return lb_method
+
+
+def set_lb_method(api, pool, lb_method):
+ lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
+ api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
+
+
+def get_monitors(api, pool):
+ result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
+ monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
+ quorum = result['quorum']
+ monitor_templates = result['monitor_templates']
+ return (monitor_type, quorum, monitor_templates)
+
+
+def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
+ monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
+ monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
+ monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
+ api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
+
+
+def get_slow_ramp_time(api, pool):
+ result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
+ return result
+
+
+def set_slow_ramp_time(api, pool, seconds):
+ api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
+
+
+def get_reselect_tries(api, pool):
+ result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0]
+ return result
+
+
+def set_reselect_tries(api, pool, tries):
+ api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries])
+
+
+def get_action_on_service_down(api, pool):
+ result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
+ result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
+ return result
+
+
+def set_action_on_service_down(api, pool, action):
+ action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
+ api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
+
+
+def member_exists(api, pool, address, port):
+ # hack to determine if member exists
+ result = False
+ try:
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
+ members=[members])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def delete_node_address(api, address):
+ result = False
+ try:
+ api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "is referenced by a member of pool" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def remove_pool_member(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
+
+
+def add_pool_member(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
+
+
+def main():
+ lb_method_choices = ['round_robin', 'ratio_member',
+ 'least_connection_member', 'observed_member',
+ 'predictive_member', 'ratio_node_address',
+ 'least_connection_node_address',
+ 'fastest_node_address', 'observed_node_address',
+ 'predictive_node_address', 'dynamic_ratio',
+ 'fastest_app_response', 'least_sessions',
+ 'dynamic_ratio_member', 'l3_addr',
+ 'weighted_least_connection_member',
+ 'weighted_least_connection_node_address',
+ 'ratio_session', 'ratio_least_connection_member',
+ 'ratio_least_connection_node_address']
+
+ monitor_type_choices = ['and_list', 'm_of_n']
+
+ service_down_choices = ['none', 'reset', 'drop', 'reselect']
+
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(type='str', required=True, aliases=['pool']),
+ lb_method=dict(type='str', choices=lb_method_choices),
+ monitor_type=dict(type='str', choices=monitor_type_choices),
+ quorum=dict(type='int'),
+ monitors=dict(type='list'),
+ slow_ramp_time=dict(type='int'),
+ reselect_tries=dict(type='int'),
+ service_down_action=dict(type='str', choices=service_down_choices),
+ host=dict(type='str', aliases=['address']),
+ port=dict(type='int')
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ name = module.params['name']
+ pool = fq_name(partition, name)
+ lb_method = module.params['lb_method']
+ if lb_method:
+ lb_method = lb_method.lower()
+ monitor_type = module.params['monitor_type']
+ if monitor_type:
+ monitor_type = monitor_type.lower()
+ quorum = module.params['quorum']
+ monitors = module.params['monitors']
+ if monitors:
+ monitors = []
+ for monitor in module.params['monitors']:
+ monitors.append(fq_name(partition, monitor))
+ slow_ramp_time = module.params['slow_ramp_time']
+ reselect_tries = module.params['reselect_tries']
+ service_down_action = module.params['service_down_action']
+ if service_down_action:
+ service_down_action = service_down_action.lower()
+ host = module.params['host']
+ address = fq_name(partition, host)
+ port = module.params['port']
+
+ # sanity check user supplied values
+
+ if (host and port is None) or (port is not None and not host):
+ module.fail_json(msg="both host and port must be supplied")
+
+ if port is not None and (0 > port or port > 65535):
+ module.fail_json(msg="valid ports must be in range 0 - 65535")
+
+ if monitors:
+ if len(monitors) == 1:
+ # set default required values for single monitor
+ quorum = 0
+ monitor_type = 'single'
+ elif len(monitors) > 1:
+ if not monitor_type:
+ module.fail_json(msg="monitor_type required for monitors > 1")
+ if monitor_type == 'm_of_n' and not quorum:
+ module.fail_json(msg="quorum value required for monitor_type m_of_n")
+ if monitor_type != 'm_of_n':
+ quorum = 0
+ elif monitor_type:
+ # no monitors specified but monitor_type exists
+ module.fail_json(msg="monitor_type require monitors parameter")
+ elif quorum is not None:
+ # no monitors specified but quorum exists
+ module.fail_json(msg="quorum requires monitors parameter")
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if host and port and pool:
+ # member removal takes precedent
+ if pool_exists(api, pool) and member_exists(api, pool, address, port):
+ if not module.check_mode:
+ remove_pool_member(api, pool, address, port)
+ deleted = delete_node_address(api, address)
+ result = {'changed': True, 'deleted': deleted}
+ else:
+ result = {'changed': True}
+ elif pool_exists(api, pool):
+ # no host/port supplied, must be pool removal
+ if not module.check_mode:
+ # hack to handle concurrent runs of module
+ # pool might be gone before we actually remove it
+ try:
+ remove_pool(api, pool)
+ result = {'changed': True}
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = {'changed': False}
+ else:
+ # genuine exception
+ raise
+ else:
+ # check-mode return value
+ result = {'changed': True}
+
+ elif state == 'present':
+ update = False
+ if not pool_exists(api, pool):
+ # pool does not exist -- need to create it
+ if not module.check_mode:
+ # a bit of a hack to handle concurrent runs of this module.
+ # even though we've checked the pool doesn't exist,
+ # it may exist by the time we run create_pool().
+ # this catches the exception and does something smart
+ # about it!
+ try:
+ create_pool(api, pool, lb_method)
+ result = {'changed': True}
+ except bigsuds.OperationFailed as e:
+ if "already exists" in str(e):
+ update = True
+ else:
+ # genuine exception
+ raise
+ else:
+ if monitors:
+ set_monitors(api, pool, monitor_type, quorum, monitors)
+ if slow_ramp_time:
+ set_slow_ramp_time(api, pool, slow_ramp_time)
+ if reselect_tries:
+ set_reselect_tries(api, pool, reselect_tries)
+ if service_down_action:
+ set_action_on_service_down(api, pool, service_down_action)
+ if host and port:
+ add_pool_member(api, pool, address, port)
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ else:
+ # pool exists -- potentially modify attributes
+ update = True
+
+ if update:
+ if lb_method and lb_method != get_lb_method(api, pool):
+ if not module.check_mode:
+ set_lb_method(api, pool, lb_method)
+ result = {'changed': True}
+ if monitors:
+ t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool)
+ if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
+ if not module.check_mode:
+ set_monitors(api, pool, monitor_type, quorum, monitors)
+ result = {'changed': True}
+ if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool):
+ if not module.check_mode:
+ set_slow_ramp_time(api, pool, slow_ramp_time)
+ result = {'changed': True}
+ if reselect_tries and reselect_tries != get_reselect_tries(api, pool):
+ if not module.check_mode:
+ set_reselect_tries(api, pool, reselect_tries)
+ result = {'changed': True}
+ if service_down_action and service_down_action != get_action_on_service_down(api, pool):
+ if not module.check_mode:
+ set_action_on_service_down(api, pool, service_down_action)
+ result = {'changed': True}
+ if (host and port) and not member_exists(api, pool, address, port):
+ if not module.check_mode:
+ add_pool_member(api, pool, address, port)
+ result = {'changed': True}
+ if (host and port == 0) and not member_exists(api, pool, address, port):
+ if not module.check_mode:
+ add_pool_member(api, pool, address, port)
+ result = {'changed': True}
+
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_pool_member.py b/lib/ansible/modules/extras/network/f5/bigip_pool_member.py
new file mode 100644
index 0000000000..f93ac271ec
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_pool_member.py
@@ -0,0 +1,505 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2013, Matt Hite <mhite@hotmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_pool_member
+short_description: Manages F5 BIG-IP LTM pool members
+description:
+ - Manages F5 BIG-IP LTM pool members via iControl SOAP API
+version_added: 1.4
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
+notes:
+ - Requires BIG-IP software version >= 11
+ - F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
+ - Best run as a local_action in your playbook
+ - Supersedes bigip_pool for managing pool members
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Pool member state
+ required: true
+ default: present
+ choices:
+ - present
+ - absent
+ session_state:
+ description:
+ - Set new session availability status for pool member
+ version_added: 2.0
+ required: false
+ default: null
+ choices:
+ - enabled
+ - disabled
+ monitor_state:
+ description:
+ - Set monitor availability status for pool member
+ version_added: 2.0
+ required: false
+ default: null
+ choices:
+ - enabled
+ - disabled
+ pool:
+ description:
+ - Pool name. This pool must exist.
+ required: true
+ partition:
+ description:
+ - Partition
+ required: false
+ default: 'Common'
+ host:
+ description:
+ - Pool member IP
+ required: true
+ aliases:
+ - address
+ - name
+ port:
+ description:
+ - Pool member port
+ required: true
+ connection_limit:
+ description:
+ - Pool member connection limit. Setting this to 0 disables the limit.
+ required: false
+ default: null
+ description:
+ description:
+ - Pool member description
+ required: false
+ default: null
+ rate_limit:
+ description:
+ - Pool member rate limit (connections-per-second). Setting this to 0
+ disables the limit.
+ required: false
+ default: null
+ ratio:
+ description:
+ - Pool member ratio weight. Valid values range from 1 through 100.
+ New pool members -- unless overriden with this value -- default
+ to 1.
+ required: false
+ default: null
+ preserve_node:
+ description:
+ - When state is absent and the pool member is no longer referenced
+ in other pools, the default behavior removes the unused node
+ o bject. Setting this to 'yes' disables this behavior.
+ required: false
+ default: 'no'
+ choices:
+ - yes
+ - no
+ version_added: 2.1
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Add pool member
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ description: "web server"
+ connection_limit: 100
+ rate_limit: 50
+ ratio: 2
+ delegate_to: localhost
+
+- name: Modify pool member ratio and description
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ ratio: 1
+ description: "nginx server"
+ delegate_to: localhost
+
+- name: Remove pool member from pool
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ delegate_to: localhost
+
+
+# The BIG-IP GUI doesn't map directly to the API calls for "Pool ->
+# Members -> State". The following states map to API monitor
+# and session states.
+#
+# Enabled (all traffic allowed):
+# monitor_state=enabled, session_state=enabled
+# Disabled (only persistent or active connections allowed):
+# monitor_state=enabled, session_state=disabled
+# Forced offline (only active connections allowed):
+# monitor_state=disabled, session_state=disabled
+#
+# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
+
+- name: Force pool member offline
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ session_state: "disabled"
+ monitor_state: "disabled"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ delegate_to: localhost
+'''
+
+
+def pool_exists(api, pool):
+ # hack to determine if pool exists
+ result = False
+ try:
+ api.LocalLB.Pool.get_object_status(pool_names=[pool])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def member_exists(api, pool, address, port):
+ # hack to determine if member exists
+ result = False
+ try:
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
+ members=[members])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def delete_node_address(api, address):
+ result = False
+ try:
+ api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "is referenced by a member of pool" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def remove_pool_member(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.remove_member_v2(
+ pool_names=[pool],
+ members=[members]
+ )
+
+
+def add_pool_member(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.add_member_v2(
+ pool_names=[pool],
+ members=[members]
+ )
+
+
+def get_connection_limit(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ result = api.LocalLB.Pool.get_member_connection_limit(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
+ return result
+
+
+def set_connection_limit(api, pool, address, port, limit):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.set_member_connection_limit(
+ pool_names=[pool],
+ members=[members],
+ limits=[[limit]]
+ )
+
+
+def get_description(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ result = api.LocalLB.Pool.get_member_description(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
+ return result
+
+
+def set_description(api, pool, address, port, description):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.set_member_description(
+ pool_names=[pool],
+ members=[members],
+ descriptions=[[description]]
+ )
+
+
+def get_rate_limit(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ result = api.LocalLB.Pool.get_member_rate_limit(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
+ return result
+
+
+def set_rate_limit(api, pool, address, port, limit):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.set_member_rate_limit(
+ pool_names=[pool],
+ members=[members],
+ limits=[[limit]]
+ )
+
+
+def get_ratio(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ result = api.LocalLB.Pool.get_member_ratio(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
+ return result
+
+
+def set_ratio(api, pool, address, port, ratio):
+ members = [{'address': address, 'port': port}]
+ api.LocalLB.Pool.set_member_ratio(
+ pool_names=[pool],
+ members=[members],
+ ratios=[[ratio]]
+ )
+
+
+def set_member_session_enabled_state(api, pool, address, port, session_state):
+ members = [{'address': address, 'port': port}]
+ session_state = ["STATE_%s" % session_state.strip().upper()]
+ api.LocalLB.Pool.set_member_session_enabled_state(
+ pool_names=[pool],
+ members=[members],
+ session_states=[session_state]
+ )
+
+
+def get_member_session_status(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ result = api.LocalLB.Pool.get_member_session_status(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
+ result = result.split("SESSION_STATUS_")[-1].lower()
+ return result
+
+
+def set_member_monitor_state(api, pool, address, port, monitor_state):
+ members = [{'address': address, 'port': port}]
+ monitor_state = ["STATE_%s" % monitor_state.strip().upper()]
+ api.LocalLB.Pool.set_member_monitor_state(
+ pool_names=[pool],
+ members=[members],
+ monitor_states=[monitor_state]
+ )
+
+
+def get_member_monitor_status(api, pool, address, port):
+ members = [{'address': address, 'port': port}]
+ result = api.LocalLB.Pool.get_member_monitor_status(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
+ result = result.split("MONITOR_STATUS_")[-1].lower()
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ session_state=dict(type='str', choices=['enabled', 'disabled']),
+ monitor_state=dict(type='str', choices=['enabled', 'disabled']),
+ pool=dict(type='str', required=True),
+ host=dict(type='str', required=True, aliases=['address', 'name']),
+ port=dict(type='int', required=True),
+ connection_limit=dict(type='int'),
+ description=dict(type='str'),
+ rate_limit=dict(type='int'),
+ ratio=dict(type='int'),
+ preserve_node=dict(type='bool', default=False)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ session_state = module.params['session_state']
+ monitor_state = module.params['monitor_state']
+ pool = fq_name(partition, module.params['pool'])
+ connection_limit = module.params['connection_limit']
+ description = module.params['description']
+ rate_limit = module.params['rate_limit']
+ ratio = module.params['ratio']
+ host = module.params['host']
+ address = fq_name(partition, host)
+ port = module.params['port']
+ preserve_node = module.params['preserve_node']
+
+ if (host and port is None) or (port is not None and not host):
+ module.fail_json(msg="both host and port must be supplied")
+
+ if 0 > port or port > 65535:
+ module.fail_json(msg="valid ports must be in range 0 - 65535")
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ if not pool_exists(api, pool):
+ module.fail_json(msg="pool %s does not exist" % pool)
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if member_exists(api, pool, address, port):
+ if not module.check_mode:
+ remove_pool_member(api, pool, address, port)
+ if preserve_node:
+ result = {'changed': True}
+ else:
+ deleted = delete_node_address(api, address)
+ result = {'changed': True, 'deleted': deleted}
+ else:
+ result = {'changed': True}
+
+ elif state == 'present':
+ if not member_exists(api, pool, address, port):
+ if not module.check_mode:
+ add_pool_member(api, pool, address, port)
+ if connection_limit is not None:
+ set_connection_limit(api, pool, address, port, connection_limit)
+ if description is not None:
+ set_description(api, pool, address, port, description)
+ if rate_limit is not None:
+ set_rate_limit(api, pool, address, port, rate_limit)
+ if ratio is not None:
+ set_ratio(api, pool, address, port, ratio)
+ if session_state is not None:
+ set_member_session_enabled_state(api, pool, address, port, session_state)
+ if monitor_state is not None:
+ set_member_monitor_state(api, pool, address, port, monitor_state)
+ result = {'changed': True}
+ else:
+ # pool member exists -- potentially modify attributes
+ if connection_limit is not None and connection_limit != get_connection_limit(api, pool, address, port):
+ if not module.check_mode:
+ set_connection_limit(api, pool, address, port, connection_limit)
+ result = {'changed': True}
+ if description is not None and description != get_description(api, pool, address, port):
+ if not module.check_mode:
+ set_description(api, pool, address, port, description)
+ result = {'changed': True}
+ if rate_limit is not None and rate_limit != get_rate_limit(api, pool, address, port):
+ if not module.check_mode:
+ set_rate_limit(api, pool, address, port, rate_limit)
+ result = {'changed': True}
+ if ratio is not None and ratio != get_ratio(api, pool, address, port):
+ if not module.check_mode:
+ set_ratio(api, pool, address, port, ratio)
+ result = {'changed': True}
+ if session_state is not None:
+ session_status = get_member_session_status(api, pool, address, port)
+ if session_state == 'enabled' and session_status == 'forced_disabled':
+ if not module.check_mode:
+ set_member_session_enabled_state(api, pool, address, port, session_state)
+ result = {'changed': True}
+ elif session_state == 'disabled' and session_status != 'forced_disabled':
+ if not module.check_mode:
+ set_member_session_enabled_state(api, pool, address, port, session_state)
+ result = {'changed': True}
+ if monitor_state is not None:
+ monitor_status = get_member_monitor_status(api, pool, address, port)
+ if monitor_state == 'enabled' and monitor_status == 'forced_down':
+ if not module.check_mode:
+ set_member_monitor_state(api, pool, address, port, monitor_state)
+ result = {'changed': True}
+ elif monitor_state == 'disabled' and monitor_status != 'forced_down':
+ if not module.check_mode:
+ set_member_monitor_state(api, pool, address, port, monitor_state)
+ result = {'changed': True}
+
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_routedomain.py b/lib/ansible/modules/extras/network/f5/bigip_routedomain.py
new file mode 100644
index 0000000000..552b20231c
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_routedomain.py
@@ -0,0 +1,523 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_routedomain
+short_description: Manage route domains on a BIG-IP
+description:
+ - Manage route domains on a BIG-IP
+version_added: "2.2"
+options:
+ bwc_policy:
+ description:
+ - The bandwidth controller for the route domain.
+ connection_limit:
+ description:
+ - The maximum number of concurrent connections allowed for the
+ route domain. Setting this to C(0) turns off connection limits.
+ description:
+ description:
+ - Specifies descriptive text that identifies the route domain.
+ flow_eviction_policy:
+ description:
+ - The eviction policy to use with this route domain. Apply an eviction
+ policy to provide customized responses to flow overflows and slow
+ flows on the route domain.
+ id:
+ description:
+ - The unique identifying integer representing the route domain.
+ required: true
+ parent:
+ description: |
+ Specifies the route domain the system searches when it cannot
+ find a route in the configured domain.
+ routing_protocol:
+ description:
+ - Dynamic routing protocols for the system to use in the route domain.
+ choices:
+ - BFD
+ - BGP
+ - IS-IS
+ - OSPFv2
+ - OSPFv3
+ - PIM
+ - RIP
+ - RIPng
+ service_policy:
+ description:
+ - Service policy to associate with the route domain.
+ state:
+ description:
+ - Whether the route domain should exist or not.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ strict:
+ description:
+ - Specifies whether the system enforces cross-routing restrictions
+ or not.
+ choices:
+ - enabled
+ - disabled
+ vlans:
+ description:
+ - VLANs for the system to use in the route domain
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create a route domain
+ bigip_routedomain:
+ id: "1234"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "present"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Set VLANs on the route domain
+ bigip_routedomain:
+ id: "1234"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "present"
+ user: "admin"
+ vlans:
+ - net1
+ - foo
+ delegate_to: localhost
+'''
+
+RETURN = '''
+id:
+ description: The ID of the route domain that was changed
+ returned: changed
+ type: int
+ sample: 2
+description:
+ description: The description of the route domain
+ returned: changed
+ type: string
+ sample: "route domain foo"
+strict:
+ description: The new strict isolation setting
+ returned: changed
+ type: string
+ sample: "enabled"
+parent:
+ description: The new parent route domain
+ returned: changed
+ type: int
+ sample: 0
+vlans:
+ description: List of new VLANs the route domain is applied to
+ returned: changed
+ type: list
+ sample: ['/Common/http-tunnel', '/Common/socks-tunnel']
+routing_protocol:
+ description: List of routing protocols applied to the route domain
+ returned: changed
+ type: list
+ sample: ['bfd', 'bgp']
+bwc_policy:
+ description: The new bandwidth controller
+ returned: changed
+ type: string
+ sample: /Common/foo
+connection_limit:
+ description: The new connection limit for the route domain
+ returned: changed
+ type: integer
+ sample: 100
+flow_eviction_policy:
+ description: The new eviction policy to use with this route domain
+ returned: changed
+ type: string
+ sample: /Common/default-eviction-policy
+service_policy:
+ description: The new service policy to use with this route domain
+ returned: changed
+ type: string
+ sample: /Common-my-service-policy
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+PROTOCOLS = [
+ 'BFD', 'BGP', 'IS-IS', 'OSPFv2', 'OSPFv3', 'PIM', 'RIP', 'RIPng'
+]
+
+STRICTS = ['enabled', 'disabled']
+
+
+class BigIpRouteDomain(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ kwargs['name'] = str(kwargs['id'])
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def absent(self):
+ if not self.exists():
+ return False
+
+ if self.params['check_mode']:
+ return True
+
+ rd = self.api.tm.net.route_domains.route_domain.load(
+ name=self.params['name']
+ )
+ rd.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the route domain")
+ else:
+ return True
+
+ def present(self):
+ if self.exists():
+ return self.update()
+ else:
+ if self.params['check_mode']:
+ return True
+ return self.create()
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ r = self.api.tm.net.route_domains.route_domain.load(
+ name=self.params['name']
+ )
+
+ p['id'] = int(r.id)
+ p['name'] = str(r.name)
+
+ if hasattr(r, 'connectionLimit'):
+ p['connection_limit'] = int(r.connectionLimit)
+ if hasattr(r, 'description'):
+ p['description'] = str(r.description)
+ if hasattr(r, 'strict'):
+ p['strict'] = str(r.strict)
+ if hasattr(r, 'parent'):
+ p['parent'] = r.parent
+ if hasattr(r, 'vlans'):
+ p['vlans'] = list(set([str(x) for x in r.vlans]))
+ if hasattr(r, 'routingProtocol'):
+ p['routing_protocol'] = list(set([str(x) for x in r.routingProtocol]))
+ if hasattr(r, 'flowEvictionPolicy'):
+ p['flow_eviction_policy'] = str(r.flowEvictionPolicy)
+ if hasattr(r, 'bwcPolicy'):
+ p['bwc_policy'] = str(r.bwcPolicy)
+ if hasattr(r, 'servicePolicy'):
+ p['service_policy'] = str(r.servicePolicy)
+ return p
+
+ def domains(self):
+ result = []
+
+ domains = self.api.tm.net.route_domains.get_collection()
+ for domain in domains:
+ # Just checking for the addition of the partition here for
+ # different versions of BIG-IP
+ if '/' + self.params['partition'] + '/' in domain.name:
+ result.append(domain.name)
+ else:
+ full_name = '/%s/%s' % (self.params['partition'], domain.name)
+ result.append(full_name)
+ return result
+
+ def create(self):
+ params = dict()
+ params['id'] = self.params['id']
+ params['name'] = self.params['name']
+
+ partition = self.params['partition']
+ description = self.params['description']
+ strict = self.params['strict']
+ parent = self.params['parent']
+ bwc_policy = self.params['bwc_policy']
+ vlans = self.params['vlans']
+ routing_protocol = self.params['routing_protocol']
+ connection_limit = self.params['connection_limit']
+ flow_eviction_policy = self.params['flow_eviction_policy']
+ service_policy = self.params['service_policy']
+
+ if description is not None:
+ params['description'] = description
+
+ if strict is not None:
+ params['strict'] = strict
+
+ if parent is not None:
+ parent = '/%s/%s' % (partition, parent)
+ if parent in self.domains():
+ params['parent'] = parent
+ else:
+ raise F5ModuleError(
+ "The parent route domain was not found"
+ )
+
+ if bwc_policy is not None:
+ policy = '/%s/%s' % (partition, bwc_policy)
+ params['bwcPolicy'] = policy
+
+ if vlans is not None:
+ params['vlans'] = []
+ for vlan in vlans:
+ vname = '/%s/%s' % (partition, vlan)
+ params['vlans'].append(vname)
+
+ if routing_protocol is not None:
+ params['routingProtocol'] = []
+ for protocol in routing_protocol:
+ if protocol in PROTOCOLS:
+ params['routingProtocol'].append(protocol)
+ else:
+ raise F5ModuleError(
+ "routing_protocol must be one of: %s" % (PROTOCOLS)
+ )
+
+ if connection_limit is not None:
+ params['connectionLimit'] = connection_limit
+
+ if flow_eviction_policy is not None:
+ policy = '/%s/%s' % (partition, flow_eviction_policy)
+ params['flowEvictionPolicy'] = policy
+
+ if service_policy is not None:
+ policy = '/%s/%s' % (partition, service_policy)
+ params['servicePolicy'] = policy
+
+ self.api.tm.net.route_domains.route_domain.create(**params)
+ exists = self.api.tm.net.route_domains.route_domain.exists(
+ name=self.params['name']
+ )
+
+ if exists:
+ return True
+ else:
+ raise F5ModuleError(
+ "An error occurred while creating the route domain"
+ )
+
+ def update(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ partition = self.params['partition']
+ description = self.params['description']
+ strict = self.params['strict']
+ parent = self.params['parent']
+ bwc_policy = self.params['bwc_policy']
+ vlans = self.params['vlans']
+ routing_protocol = self.params['routing_protocol']
+ connection_limit = self.params['connection_limit']
+ flow_eviction_policy = self.params['flow_eviction_policy']
+ service_policy = self.params['service_policy']
+
+ if description is not None:
+ if 'description' in current:
+ if description != current['description']:
+ params['description'] = description
+ else:
+ params['description'] = description
+
+ if strict is not None:
+ if strict != current['strict']:
+ params['strict'] = strict
+
+ if parent is not None:
+ parent = '/%s/%s' % (partition, parent)
+ if 'parent' in current:
+ if parent != current['parent']:
+ params['parent'] = parent
+ else:
+ params['parent'] = parent
+
+ if bwc_policy is not None:
+ policy = '/%s/%s' % (partition, bwc_policy)
+ if 'bwc_policy' in current:
+ if policy != current['bwc_policy']:
+ params['bwcPolicy'] = policy
+ else:
+ params['bwcPolicy'] = policy
+
+ if vlans is not None:
+ tmp = set()
+ for vlan in vlans:
+ vname = '/%s/%s' % (partition, vlan)
+ tmp.add(vname)
+ tmp = list(tmp)
+ if 'vlans' in current:
+ if tmp != current['vlans']:
+ params['vlans'] = tmp
+ else:
+ params['vlans'] = tmp
+
+ if routing_protocol is not None:
+ tmp = set()
+ for protocol in routing_protocol:
+ if protocol in PROTOCOLS:
+ tmp.add(protocol)
+ else:
+ raise F5ModuleError(
+ "routing_protocol must be one of: %s" % (PROTOCOLS)
+ )
+ tmp = list(tmp)
+ if 'routing_protocol' in current:
+ if tmp != current['routing_protocol']:
+ params['routingProtocol'] = tmp
+ else:
+ params['routingProtocol'] = tmp
+
+ if connection_limit is not None:
+ if connection_limit != current['connection_limit']:
+ params['connectionLimit'] = connection_limit
+
+ if flow_eviction_policy is not None:
+ policy = '/%s/%s' % (partition, flow_eviction_policy)
+ if 'flow_eviction_policy' in current:
+ if policy != current['flow_eviction_policy']:
+ params['flowEvictionPolicy'] = policy
+ else:
+ params['flowEvictionPolicy'] = policy
+
+ if service_policy is not None:
+ policy = '/%s/%s' % (partition, service_policy)
+ if 'service_policy' in current:
+ if policy != current['service_policy']:
+ params['servicePolicy'] = policy
+ else:
+ params['servicePolicy'] = policy
+
+ if params:
+ changed = True
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ try:
+ rd = self.api.tm.net.route_domains.route_domain.load(
+ name=self.params['name']
+ )
+ rd.update(**params)
+ rd.refresh()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(e)
+
+ return True
+
+ def exists(self):
+ return self.api.tm.net.route_domains.route_domain.exists(
+ name=self.params['name']
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ if self.params['check_mode']:
+ if value == current:
+ changed = False
+ else:
+ changed = True
+ else:
+ if state == "present":
+ changed = self.present()
+ current = self.read()
+ result.update(current)
+ elif state == "absent":
+ changed = self.absent()
+
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ id=dict(required=True, type='int'),
+ description=dict(required=False, default=None),
+ strict=dict(required=False, default=None, choices=STRICTS),
+ parent=dict(required=False, type='int', default=None),
+ vlans=dict(required=False, default=None, type='list'),
+ routing_protocol=dict(required=False, default=None, type='list'),
+ bwc_policy=dict(required=False, type='str', default=None),
+ connection_limit=dict(required=False, type='int', default=None),
+ flow_eviction_policy=dict(required=False, type='str', default=None),
+ service_policy=dict(required=False, type='str', default=None)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpRouteDomain(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_selfip.py b/lib/ansible/modules/extras/network/f5/bigip_selfip.py
new file mode 100644
index 0000000000..6cbf7badb5
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_selfip.py
@@ -0,0 +1,659 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_selfip
+short_description: Manage Self-IPs on a BIG-IP system
+description:
+ - Manage Self-IPs on a BIG-IP system
+version_added: "2.2"
+options:
+ address:
+ description:
+ - The IP addresses for the new self IP. This value is ignored upon update
+ as addresses themselves cannot be changed after they are created.
+ allow_service:
+ description:
+ - Configure port lockdown for the Self IP. By default, the Self IP has a
+ "default deny" policy. This can be changed to allow TCP and UDP ports
+ as well as specific protocols. This list should contain C(protocol):C(port)
+ values.
+ name:
+ description:
+ - The self IP to create.
+ required: true
+ default: Value of C(address)
+ netmask:
+ description:
+ - The netmasks for the self IP.
+ required: true
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that the Self-IP exists with the provided attributes. When C(absent),
+ removes the Self-IP from the system.
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+ traffic_group:
+ description:
+ - The traffic group for the self IP addresses in an active-active,
+ redundant load balancer configuration.
+ required: false
+ vlan:
+ description:
+ - The VLAN that the new self IPs will be on.
+ required: true
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires the netaddr Python package on the host.
+extends_documentation_fragment: f5
+requirements:
+ - netaddr
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create Self IP
+ bigip_selfip:
+ address: "10.10.10.10"
+ name: "self1"
+ netmask: "255.255.255.0"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ vlan: "vlan1"
+ delegate_to: localhost
+
+- name: Delete Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Allow management web UI to be accessed on this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - "tcp:443"
+ delegate_to: localhost
+
+- name: Allow HTTPS and SSH access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - "tcp:443"
+ - "tpc:22"
+ delegate_to: localhost
+
+- name: Allow all services access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - all
+ delegate_to: localhost
+
+- name: Allow only GRE and IGMP protocols access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - gre:0
+ - igmp:0
+ delegate_to: localhost
+
+- name: Allow all TCP, but no other protocols access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - tcp:0
+ delegate_to: localhost
+'''
+
+RETURN = '''
+allow_service:
+ description: Services that allowed via this Self IP
+ returned: changed
+ type: list
+ sample: ['igmp:0','tcp:22','udp:53']
+address:
+ description: The address for the Self IP
+ returned: created
+ type: string
+ sample: "192.0.2.10"
+name:
+ description: The name of the Self IP
+ returned:
+ - created
+ - changed
+ - deleted
+ type: string
+ sample: "self1"
+netmask:
+ description: The netmask of the Self IP
+ returned:
+ - changed
+ - created
+ type: string
+ sample: "255.255.255.0"
+traffic_group:
+ description: The traffic group that the Self IP is a member of
+ return:
+ - changed
+ - created
+ type: string
+ sample: "traffic-group-local-only"
+vlan:
+ description: The VLAN set on the Self IP
+ return:
+ - changed
+ - created
+ type: string
+ sample: "vlan1"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+try:
+ from netaddr import IPNetwork, AddrFormatError
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+FLOAT = ['enabled', 'disabled']
+DEFAULT_TG = 'traffic-group-local-only'
+ALLOWED_PROTOCOLS = ['eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
+ 'l2tp', 'ospf', 'pim', 'tcp', 'udp']
+
+
+class BigIpSelfIp(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def present(self):
+ changed = False
+
+ if self.exists():
+ changed = self.update()
+ else:
+ changed = self.create()
+
+ return changed
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+
+ :return: List of values currently stored in BIG-IP, formatted for use
+ in this class.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ r = self.api.tm.net.selfips.selfip.load(
+ name=name,
+ partition=partition
+ )
+
+ if hasattr(r, 'address'):
+ ipnet = IPNetwork(r.address)
+ p['address'] = str(ipnet.ip)
+ if hasattr(r, 'address'):
+ ipnet = IPNetwork(r.address)
+ p['netmask'] = str(ipnet.netmask)
+ if hasattr(r, 'trafficGroup'):
+ p['traffic_group'] = str(r.trafficGroup)
+ if hasattr(r, 'vlan'):
+ p['vlan'] = str(r.vlan)
+ if hasattr(r, 'allowService'):
+ if r.allowService == 'all':
+ p['allow_service'] = set(['all'])
+ else:
+ p['allow_service'] = set([str(x) for x in r.allowService])
+ else:
+ p['allow_service'] = set(['none'])
+ p['name'] = name
+ return p
+
+ def verify_services(self):
+ """Verifies that a supplied service string has correct format
+
+ The string format for port lockdown is PROTOCOL:PORT. This method
+ will verify that the provided input matches the allowed protocols
+ and the port ranges before submitting to BIG-IP.
+
+ The only allowed exceptions to this rule are the following values
+
+ * all
+ * default
+ * none
+
+ These are special cases that are handled differently in the API.
+ "all" is set as a string, "default" is set as a one item list, and
+ "none" removes the key entirely from the REST API.
+
+ :raises F5ModuleError:
+ """
+ result = []
+ for svc in self.params['allow_service']:
+ if svc in ['all', 'none', 'default']:
+ result = [svc]
+ break
+
+ tmp = svc.split(':')
+ if tmp[0] not in ALLOWED_PROTOCOLS:
+ raise F5ModuleError(
+ "The provided protocol '%s' is invalid" % (tmp[0])
+ )
+ try:
+ port = int(tmp[1])
+ except Exception:
+ raise F5ModuleError(
+ "The provided port '%s' is not a number" % (tmp[1])
+ )
+
+ if port < 0 or port > 65535:
+ raise F5ModuleError(
+ "The provided port '%s' must be between 0 and 65535"
+ % (port)
+ )
+ else:
+ result.append(svc)
+ return set(result)
+
+ def fmt_services(self, services):
+ """Returns services formatted for consumption by f5-sdk update
+
+ The BIG-IP endpoint for services takes different values depending on
+ what you want the "allowed services" to be. It can be any of the
+ following
+
+ - a list containing "protocol:port" values
+ - the string "all"
+ - a null value, or None
+
+ This is a convenience function to massage the values the user has
+ supplied so that they are formatted in such a way that BIG-IP will
+ accept them and apply the specified policy.
+
+ :param services: The services to format. This is always a Python set
+ :return:
+ """
+ result = list(services)
+ if result[0] == 'all':
+ return 'all'
+ elif result[0] == 'none':
+ return None
+ else:
+ return list(services)
+
+ def traffic_groups(self):
+ result = []
+
+ groups = self.api.tm.cm.traffic_groups.get_collection()
+ for group in groups:
+ # Just checking for the addition of the partition here for
+ # different versions of BIG-IP
+ if '/' + self.params['partition'] + '/' in group.name:
+ result.append(group.name)
+ else:
+ full_name = '/%s/%s' % (self.params['partition'], group.name)
+ result.append(str(full_name))
+ return result
+
+ def update(self):
+ changed = False
+ svcs = []
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ address = self.params['address']
+ allow_service = self.params['allow_service']
+ name = self.params['name']
+ netmask = self.params['netmask']
+ partition = self.params['partition']
+ traffic_group = self.params['traffic_group']
+ vlan = self.params['vlan']
+
+ if address is not None and address != current['address']:
+ raise F5ModuleError(
+ 'Self IP addresses cannot be updated'
+ )
+
+ if netmask is not None:
+ # I ignore the address value here even if they provide it because
+ # you are not allowed to change it.
+ try:
+ address = IPNetwork(current['address'])
+
+ new_addr = "%s/%s" % (address.ip, netmask)
+ nipnet = IPNetwork(new_addr)
+
+ cur_addr = "%s/%s" % (current['address'], current['netmask'])
+ cipnet = IPNetwork(cur_addr)
+
+ if nipnet != cipnet:
+ address = "%s/%s" % (nipnet.ip, nipnet.prefixlen)
+ params['address'] = address
+ except AddrFormatError:
+ raise F5ModuleError(
+ 'The provided address/netmask value was invalid'
+ )
+
+ if traffic_group is not None:
+ traffic_group = "/%s/%s" % (partition, traffic_group)
+ if traffic_group not in self.traffic_groups():
+ raise F5ModuleError(
+ 'The specified traffic group was not found'
+ )
+
+ if 'traffic_group' in current:
+ if traffic_group != current['traffic_group']:
+ params['trafficGroup'] = traffic_group
+ else:
+ params['trafficGroup'] = traffic_group
+
+ if vlan is not None:
+ vlans = self.get_vlans()
+ vlan = "/%s/%s" % (partition, vlan)
+
+ if 'vlan' in current:
+ if vlan != current['vlan']:
+ params['vlan'] = vlan
+ else:
+ params['vlan'] = vlan
+
+ if vlan not in vlans:
+ raise F5ModuleError(
+ 'The specified VLAN was not found'
+ )
+
+ if allow_service is not None:
+ svcs = self.verify_services()
+ if 'allow_service' in current:
+ if svcs != current['allow_service']:
+ params['allowService'] = self.fmt_services(svcs)
+ else:
+ params['allowService'] = self.fmt_services(svcs)
+
+ if params:
+ changed = True
+ params['name'] = name
+ params['partition'] = partition
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ if svcs:
+ self.cparams['allow_service'] = list(svcs)
+ else:
+ return changed
+
+ r = self.api.tm.net.selfips.selfip.load(
+ name=name,
+ partition=partition
+ )
+ r.update(**params)
+ r.refresh()
+
+ return True
+
+ def get_vlans(self):
+ """Returns formatted list of VLANs
+
+ The VLAN values stored in BIG-IP are done so using their fully
+ qualified name which includes the partition. Therefore, "correct"
+ values according to BIG-IP look like this
+
+ /Common/vlan1
+
+ This is in contrast to the formats that most users think of VLANs
+ as being stored as
+
+ vlan1
+
+ To provide for the consistent user experience while not turfing
+ BIG-IP, we need to massage the values that are provided by the
+ user so that they include the partition.
+
+ :return: List of vlans formatted with preceeding partition
+ """
+ partition = self.params['partition']
+ vlans = self.api.tm.net.vlans.get_collection()
+ return [str("/" + partition + "/" + x.name) for x in vlans]
+
+ def create(self):
+ params = dict()
+
+ svcs = []
+ check_mode = self.params['check_mode']
+ address = self.params['address']
+ allow_service = self.params['allow_service']
+ name = self.params['name']
+ netmask = self.params['netmask']
+ partition = self.params['partition']
+ traffic_group = self.params['traffic_group']
+ vlan = self.params['vlan']
+
+ if address is None or netmask is None:
+ raise F5ModuleError(
+ 'An address and a netmask must be specififed'
+ )
+
+ if vlan is None:
+ raise F5ModuleError(
+ 'A VLAN name must be specified'
+ )
+ else:
+ vlan = "/%s/%s" % (partition, vlan)
+
+ try:
+ ipin = "%s/%s" % (address, netmask)
+ ipnet = IPNetwork(ipin)
+ params['address'] = "%s/%s" % (ipnet.ip, ipnet.prefixlen)
+ except AddrFormatError:
+ raise F5ModuleError(
+ 'The provided address/netmask value was invalid'
+ )
+
+ if traffic_group is None:
+ params['trafficGroup'] = "/%s/%s" % (partition, DEFAULT_TG)
+ else:
+ traffic_group = "/%s/%s" % (partition, traffic_group)
+ if traffic_group in self.traffic_groups():
+ params['trafficGroup'] = traffic_group
+ else:
+ raise F5ModuleError(
+ 'The specified traffic group was not found'
+ )
+
+ vlans = self.get_vlans()
+ if vlan in vlans:
+ params['vlan'] = vlan
+ else:
+ raise F5ModuleError(
+ 'The specified VLAN was not found'
+ )
+
+ if allow_service is not None:
+ svcs = self.verify_services()
+ params['allowService'] = self.fmt_services(svcs)
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if svcs:
+ self.cparams['allow_service'] = list(svcs)
+
+ if check_mode:
+ return True
+
+ d = self.api.tm.net.selfips.selfip
+ d.create(**params)
+
+ if self.exists():
+ return True
+ else:
+ raise F5ModuleError("Failed to create the self IP")
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ dc = self.api.tm.net.selfips.selfip.load(**params)
+ dc.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the self IP")
+ return True
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+ return self.api.tm.net.selfips.selfip.exists(
+ name=name,
+ partition=partition
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ address=dict(required=False, default=None),
+ allow_service=dict(type='list', default=None),
+ name=dict(required=True),
+ netmask=dict(required=False, default=None),
+ traffic_group=dict(required=False, default=None),
+ vlan=dict(required=False, default=None)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ if not HAS_NETADDR:
+ raise F5ModuleError(
+ "The netaddr python module is required."
+ )
+
+ obj = BigIpSelfIp(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_ssl_certificate.py b/lib/ansible/modules/extras/network/f5/bigip_ssl_certificate.py
new file mode 100644
index 0000000000..076caba9f5
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_ssl_certificate.py
@@ -0,0 +1,516 @@
+#!/usr/bin/python
+#
+# (c) 2016, Kevin Coming (@waffie1)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+module: bigip_ssl_certificate
+short_description: Import/Delete certificates from BIG-IP
+description:
+ - This module will import/delete SSL certificates on BIG-IP LTM.
+ Certificates can be imported from certificate and key files on the local
+ disk, in PEM format.
+version_added: 2.2
+options:
+ cert_content:
+ description:
+ - When used instead of 'cert_src', sets the contents of a certificate directly
+ to the specified value. This is used with lookup plugins or for anything
+ with formatting or templating. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ key_content:
+ description:
+ - When used instead of 'key_src', sets the contents of a certificate key
+ directly to the specified value. This is used with lookup plugins or for
+ anything with formatting or templating. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ state:
+ description:
+ - Certificate and key state. This determines if the provided certificate
+ and key is to be made C(present) on the device or C(absent).
+ required: true
+ default: present
+ choices:
+ - present
+ - absent
+ partition:
+ description:
+ - BIG-IP partition to use when adding/deleting certificate.
+ required: false
+ default: Common
+ name:
+ description:
+ - SSL Certificate Name. This is the cert/key pair name used
+ when importing a certificate/key into the F5. It also
+ determines the filenames of the objects on the LTM
+ (:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
+ required: true
+ cert_src:
+ description:
+ - This is the local filename of the certificate. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ key_src:
+ description:
+ - This is the local filename of the private key. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ passphrase:
+ description:
+ - Passphrase on certificate private key
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires the netaddr Python package on the host.
+ - If you use this module, you will not be able to remove the certificates
+ and keys that are managed, via the web UI. You can only remove them via
+ tmsh or these modules.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk >= 1.3.1
+ - BigIP >= v12
+author:
+ - Kevin Coming (@waffie1)
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Import PEM Certificate from local disk
+ bigip_ssl_certificate:
+ name: "certificate-name"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ cert_src: "/path/to/cert.crt"
+ key_src: "/path/to/key.key"
+ delegate_to: localhost
+
+- name: Use a file lookup to import PEM Certificate
+ bigip_ssl_certificate:
+ name: "certificate-name"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
+ key_content: "{{ lookup('file', '/path/to/key.key') }}"
+ delegate_to: localhost
+
+- name: "Delete Certificate"
+ bigip_ssl_certificate:
+ name: "certificate-name"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+cert_name:
+ description: >
+ The name of the SSL certificate. The C(cert_name) and
+ C(key_name) will be equal to each other.
+ returned:
+ - created
+ - changed
+ - deleted
+ type: string
+ sample: "cert1"
+key_name:
+ description: >
+ The name of the SSL certificate key. The C(key_name) and
+ C(cert_name) will be equal to each other.
+ returned:
+ - created
+ - changed
+ - deleted
+ type: string
+ sample: "key1"
+partition:
+ description: Partition in which the cert/key was created
+ returned:
+ - changed
+ - created
+ - deleted
+ type: string
+ sample: "Common"
+key_checksum:
+ description: SHA1 checksum of the key that was provided
+ return:
+ - changed
+ - created
+ type: string
+ sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
+cert_checksum:
+ description: SHA1 checksum of the cert that was provided
+ return:
+ - changed
+ - created
+ type: string
+ sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
+'''
+
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+import hashlib
+import StringIO
+
+
+class BigIpSslCertificate(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ required_args = ['key_content', 'key_src', 'cert_content', 'cert_src']
+
+ ksource = kwargs['key_src']
+ if ksource:
+ with open(ksource) as f:
+ kwargs['key_content'] = f.read()
+
+ csource = kwargs['cert_src']
+ if csource:
+ with open(csource) as f:
+ kwargs['cert_content'] = f.read()
+
+ if kwargs['state'] == 'present':
+ if not any(kwargs[k] is not None for k in required_args):
+ raise F5ModuleError(
+ "Either 'key_content', 'key_src', 'cert_content' or "
+ "'cert_src' must be provided"
+ )
+
+ # This is the remote BIG-IP path from where it will look for certs
+ # to install.
+ self.dlpath = '/var/config/rest/downloads'
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def exists(self):
+ cert = self.cert_exists()
+ key = self.key_exists()
+
+ if cert and key:
+ return True
+ else:
+ return False
+
+ def get_hash(self, content):
+ k = hashlib.sha1()
+ s = StringIO.StringIO(content)
+ while True:
+ data = s.read(1024)
+ if not data:
+ break
+ k.update(data)
+ return k.hexdigest()
+
+ def present(self):
+ current = self.read()
+ changed = False
+ do_key = False
+ do_cert = False
+ chash = None
+ khash = None
+
+ check_mode = self.params['check_mode']
+ name = self.params['name']
+ partition = self.params['partition']
+ cert_content = self.params['cert_content']
+ key_content = self.params['key_content']
+ passphrase = self.params['passphrase']
+
+ # Technically you dont need to provide us with anything in the form
+ # of content for your cert, but that's kind of illogical, so we just
+ # return saying you didn't "do" anything if you left the cert and keys
+ # empty.
+ if not cert_content and not key_content:
+ return False
+
+ if key_content is not None:
+ if 'key_checksum' in current:
+ khash = self.get_hash(key_content)
+ if khash not in current['key_checksum']:
+ do_key = "update"
+ else:
+ do_key = "create"
+
+ if cert_content is not None:
+ if 'cert_checksum' in current:
+ chash = self.get_hash(cert_content)
+ if chash not in current['cert_checksum']:
+ do_cert = "update"
+ else:
+ do_cert = "create"
+
+ if do_cert or do_key:
+ changed = True
+ params = dict()
+ params['cert_name'] = name
+ params['key_name'] = name
+ params['partition'] = partition
+ if khash:
+ params['key_checksum'] = khash
+ if chash:
+ params['cert_checksum'] = chash
+ self.cparams = params
+
+ if check_mode:
+ return changed
+
+ if not do_cert and not do_key:
+ return False
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ if do_cert:
+ # Upload the content of a certificate as a StringIO object
+ cstring = StringIO.StringIO(cert_content)
+ filename = "%s.crt" % (name)
+ filepath = os.path.join(self.dlpath, filename)
+ api.shared.file_transfer.uploads.upload_stringio(
+ cstring,
+ filename
+ )
+
+ if do_cert == "update":
+ # Install the certificate
+ params = {
+ 'name': name,
+ 'partition': partition
+ }
+ cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)
+
+ # This works because, while the source path is the same,
+ # calling update causes the file to be re-read
+ cert.update()
+ changed = True
+ elif do_cert == "create":
+ # Install the certificate
+ params = {
+ 'sourcePath': "file://" + filepath,
+ 'name': name,
+ 'partition': partition
+ }
+ api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
+ changed = True
+
+ if do_key:
+ # Upload the content of a certificate key as a StringIO object
+ kstring = StringIO.StringIO(key_content)
+ filename = "%s.key" % (name)
+ filepath = os.path.join(self.dlpath, filename)
+ api.shared.file_transfer.uploads.upload_stringio(
+ kstring,
+ filename
+ )
+
+ if do_key == "update":
+ # Install the key
+ params = {
+ 'name': name,
+ 'partition': partition
+ }
+ key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)
+
+ params = dict()
+
+ if passphrase:
+ params['passphrase'] = passphrase
+ else:
+ params['passphrase'] = None
+
+ key.update(**params)
+ changed = True
+ elif do_key == "create":
+ # Install the key
+ params = {
+ 'sourcePath': "file://" + filepath,
+ 'name': name,
+ 'partition': partition
+ }
+ if passphrase:
+ params['passphrase'] = self.params['passphrase']
+ else:
+ params['passphrase'] = None
+
+ api.tm.sys.file.ssl_keys.ssl_key.create(**params)
+ changed = True
+ return changed
+
+ def key_exists(self):
+ return self.api.tm.sys.file.ssl_keys.ssl_key.exists(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+
+ def cert_exists(self):
+ return self.api.tm.sys.file.ssl_certs.ssl_cert.exists(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+
+ def read(self):
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+
+ if self.key_exists():
+ key = self.api.tm.sys.file.ssl_keys.ssl_key.load(
+ name=name,
+ partition=partition
+ )
+ if hasattr(key, 'checksum'):
+ p['key_checksum'] = str(key.checksum)
+
+ if self.cert_exists():
+ cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load(
+ name=name,
+ partition=partition
+ )
+ if hasattr(cert, 'checksum'):
+ p['cert_checksum'] = str(cert.checksum)
+
+ p['name'] = name
+ return p
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def delete(self):
+ changed = False
+
+ check_mode = self.params['check_mode']
+
+ delete_cert = self.cert_exists()
+ delete_key = self.key_exists()
+
+ if not delete_cert and not delete_key:
+ return changed
+
+ if check_mode:
+ params = dict()
+ params['cert_name'] = name
+ params['key_name'] = name
+ params['partition'] = partition
+ self.cparams = params
+ return True
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ if delete_cert:
+ # Delete the certificate
+ c = api.tm.sys.file.ssl_certs.ssl_cert.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+ c.delete()
+ changed = True
+
+ if delete_key:
+ # Delete the certificate key
+ k = self.api.tm.sys.file.ssl_keys.ssl_key.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+ k.delete()
+ changed = True
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(type='str', required=True),
+ cert_content=dict(type='str', default=None),
+ cert_src=dict(type='path', default=None),
+ key_content=dict(type='str', default=None),
+ key_src=dict(type='path', default=None),
+ passphrase=dict(type='str', default=None, no_log=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['key_content', 'key_src'],
+ ['cert_content', 'cert_src']
+ ]
+ )
+
+ try:
+ obj = BigIpSslCertificate(check_mode=module.check_mode,
+ **module.params)
+ result = obj.flush()
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_sys_db.py b/lib/ansible/modules/extras/network/f5/bigip_sys_db.py
new file mode 100644
index 0000000000..54f5dd74fc
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_sys_db.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_sys_db
+short_description: Manage BIG-IP system database variables
+description:
+ - Manage BIG-IP system database variables
+version_added: "2.2"
+options:
+ key:
+ description:
+ - The database variable to manipulate.
+ required: true
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that an existing variable is set to C(value). When C(reset) sets the
+ variable back to the default value. At least one of value and state
+ C(reset) are required.
+ required: false
+ default: present
+ choices:
+ - present
+ - reset
+ value:
+ description:
+ - The value to set the key to. At least one of value and state C(reset)
+ are required.
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires BIG-IP version 12.0.0 or greater
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the boot.quiet DB variable on the BIG-IP
+ bigip_sys_db:
+ user: "admin"
+ password: "secret"
+ server: "lb.mydomain.com"
+ key: "boot.quiet"
+ value: "disable"
+ delegate_to: localhost
+
+- name: Disable the initial setup screen
+ bigip_sys_db:
+ user: "admin"
+ password: "secret"
+ server: "lb.mydomain.com"
+ key: "setup.run"
+ value: "false"
+ delegate_to: localhost
+
+- name: Reset the initial setup screen
+ bigip_sys_db:
+ user: "admin"
+ password: "secret"
+ server: "lb.mydomain.com"
+ key: "setup.run"
+ state: "reset"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+name:
+ description: The key in the system database that was specified
+ returned: changed and success
+ type: string
+ sample: "setup.run"
+default_value:
+ description: The default value of the key
+ returned: changed and success
+ type: string
+ sample: "true"
+value:
+ description: The value that you set the key to
+ returned: changed and success
+ type: string
+ sample: "false"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpSysDb(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+ value = self.params['value']
+
+ if not state == 'reset' and not value:
+ raise F5ModuleError(
+ "When setting a key, a value must be supplied"
+ )
+
+ current = self.read()
+
+ if self.params['check_mode']:
+ if value == current:
+ changed = False
+ else:
+ changed = True
+ else:
+ if state == "present":
+ changed = self.present()
+ elif state == "reset":
+ changed = self.reset()
+ current = self.read()
+ result.update(
+ name=current.name,
+ default_value=current.defaultValue,
+ value=current.value
+ )
+
+ result.update(dict(changed=changed))
+ return result
+
+ def read(self):
+ dbs = self.api.tm.sys.dbs.db.load(
+ name=self.params['key']
+ )
+ return dbs
+
+ def present(self):
+ current = self.read()
+
+ if current.value == self.params['value']:
+ return False
+
+ current.update(value=self.params['value'])
+ current.refresh()
+
+ if current.value != self.params['value']:
+ raise F5ModuleError(
+ "Failed to set the DB variable"
+ )
+ return True
+
+ def reset(self):
+ current = self.read()
+
+ default = current.defaultValue
+ if current.value == default:
+ return False
+
+ current.update(value=default)
+ current.refresh()
+
+ if current.value != current.defaultValue:
+ raise F5ModuleError(
+ "Failed to reset the DB variable"
+ )
+
+ return True
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ key=dict(required=True),
+ state=dict(default='present', choices=['present', 'reset']),
+ value=dict(required=False, default=None)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpSysDb(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_virtual_server.py b/lib/ansible/modules/extras/network/f5/bigip_virtual_server.py
new file mode 100644
index 0000000000..89d25103f6
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_virtual_server.py
@@ -0,0 +1,614 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Etienne Carriere <etienne.carriere@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_virtual_server
+short_description: "Manages F5 BIG-IP LTM virtual servers"
+description:
+ - "Manages F5 BIG-IP LTM virtual servers via iControl SOAP API"
+version_added: "2.1"
+author:
+ - Etienne Carriere (@Etienne-Carriere)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Virtual Server state
+ - Absent, delete the VS if present
+ - C(present) (and its synonym enabled), create if needed the VS and set
+ state to enabled
+ - C(disabled), create if needed the VS and set state to disabled
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ - enabled
+ - disabled
+ aliases: []
+ partition:
+ description:
+ - Partition
+ required: false
+ default: 'Common'
+ name:
+ description:
+ - Virtual server name
+ required: true
+ aliases:
+ - vs
+ destination:
+ description:
+ - Destination IP of the virtual server (only host is currently supported).
+ Required when state=present and vs does not exist.
+ required: true
+ aliases:
+ - address
+ - ip
+ port:
+ description:
+ - Port of the virtual server . Required when state=present and vs does not exist
+ required: false
+ default: None
+ all_profiles:
+ description:
+ - List of all Profiles (HTTP,ClientSSL,ServerSSL,etc) that must be used
+ by the virtual server
+ required: false
+ default: None
+ all_rules:
+ version_added: "2.2"
+ description:
+ - List of rules to be applied in priority order
+ required: false
+ default: None
+ all_enabled_vlans:
+ version_added: "2.2"
+ description:
+ - List of vlans to be enabled
+ required: false
+ default: None
+ pool:
+ description:
+ - Default pool for the virtual server
+ required: false
+ default: None
+ snat:
+ description:
+ - Source network address policy
+ required: false
+ default: None
+ default_persistence_profile:
+ description:
+ - Default Profile which manages the session persistence
+ required: false
+ default: None
+ description:
+ description:
+ - Virtual server description
+ required: false
+ default: None
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Add virtual server
+ bigip_virtual_server:
+ server: lb.mydomain.net
+ user: admin
+ password: secret
+ state: present
+ partition: MyPartition
+ name: myvirtualserver
+ destination: "{{ ansible_default_ipv4['address'] }}"
+ port: 443
+ pool: "{{ mypool }}"
+ snat: Automap
+ description: Test Virtual Server
+ all_profiles:
+ - http
+ - clientssl
+ all_enabled_vlans:
+ - /Common/vlan2
+ delegate_to: localhost
+
+- name: Modify Port of the Virtual Server
+ bigip_virtual_server:
+ server: lb.mydomain.net
+ user: admin
+ password: secret
+ state: present
+ partition: MyPartition
+ name: myvirtualserver
+ port: 8080
+ delegate_to: localhost
+
+- name: Delete virtual server
+ bigip_virtual_server:
+ server: lb.mydomain.net
+ user: admin
+ password: secret
+ state: absent
+ partition: MyPartition
+ name: myvirtualserver
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+deleted:
+ description: Name of a virtual server that was deleted
+ returned: changed
+ type: string
+ sample: "my-virtual-server"
+'''
+
+
+# map of state values
+STATES = {
+ 'enabled': 'STATE_ENABLED',
+ 'disabled': 'STATE_DISABLED'
+}
+
+STATUSES = {
+ 'enabled': 'SESSION_STATUS_ENABLED',
+ 'disabled': 'SESSION_STATUS_DISABLED',
+ 'offline': 'SESSION_STATUS_FORCED_DISABLED'
+}
+
+
+def vs_exists(api, vs):
+ # hack to determine if pool exists
+ result = False
+ try:
+ api.LocalLB.VirtualServer.get_object_status(virtual_servers=[vs])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def vs_create(api, name, destination, port, pool):
+ _profiles = [[{'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': 'tcp'}]]
+ created = False
+ # a bit of a hack to handle concurrent runs of this module.
+ # even though we've checked the vs doesn't exist,
+ # it may exist by the time we run create_vs().
+ # this catches the exception and does something smart
+ # about it!
+ try:
+ api.LocalLB.VirtualServer.create(
+ definitions=[{'name': [name], 'address': [destination], 'port': port, 'protocol': 'PROTOCOL_TCP'}],
+ wildmasks=['255.255.255.255'],
+ resources=[{'type': 'RESOURCE_TYPE_POOL', 'default_pool_name': pool}],
+ profiles=_profiles)
+ created = True
+ return created
+ except bigsuds.OperationFailed as e:
+ if "already exists" not in str(e):
+ raise Exception('Error on creating Virtual Server : %s' % e)
+
+
+def vs_remove(api, name):
+ api.LocalLB.VirtualServer.delete_virtual_server(
+ virtual_servers=[name]
+ )
+
+
+def get_rules(api, name):
+ return api.LocalLB.VirtualServer.get_rule(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_rules(api, name, rules_list):
+ updated = False
+ if rules_list is None:
+ return False
+ rules_list = list(enumerate(rules_list))
+ try:
+ current_rules = map(lambda x: (x['priority'], x['rule_name']), get_rules(api, name))
+ to_add_rules = []
+ for i, x in rules_list:
+ if (i, x) not in current_rules:
+ to_add_rules.append({'priority': i, 'rule_name': x})
+ to_del_rules = []
+ for i, x in current_rules:
+ if (i, x) not in rules_list:
+ to_del_rules.append({'priority': i, 'rule_name': x})
+ if len(to_del_rules) > 0:
+ api.LocalLB.VirtualServer.remove_rule(
+ virtual_servers=[name],
+ rules=[to_del_rules]
+ )
+ updated = True
+ if len(to_add_rules) > 0:
+ api.LocalLB.VirtualServer.add_rule(
+ virtual_servers=[name],
+ rules=[to_add_rules]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting rules : %s' % e)
+
+
+def get_profiles(api, name):
+ return api.LocalLB.VirtualServer.get_profile(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_profiles(api, name, profiles_list):
+ updated = False
+ try:
+ if profiles_list is None:
+ return False
+ current_profiles = map(lambda x: x['profile_name'], get_profiles(api, name))
+ to_add_profiles = []
+ for x in profiles_list:
+ if x not in current_profiles:
+ to_add_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
+ to_del_profiles = []
+ for x in current_profiles:
+ if (x not in profiles_list) and (x != "/Common/tcp"):
+ to_del_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
+ if len(to_del_profiles) > 0:
+ api.LocalLB.VirtualServer.remove_profile(
+ virtual_servers=[name],
+ profiles=[to_del_profiles]
+ )
+ updated = True
+ if len(to_add_profiles) > 0:
+ api.LocalLB.VirtualServer.add_profile(
+ virtual_servers=[name],
+ profiles=[to_add_profiles]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting profiles : %s' % e)
+
+def set_enabled_vlans(api, name, vlans_enabled_list):
+ updated = False
+ try:
+ if vlans_enabled_list is None:
+ return False
+
+ to_add_vlans = []
+ for x in vlans_enabled_list:
+ to_add_vlans.append(x)
+
+ api.LocalLB.VirtualServer.set_vlan(
+ virtual_servers=[name],
+ vlans = [{ 'state':'STATE_ENABLED', 'vlans':[to_add_vlans] }]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting enabled vlans : %s' % e)
+
+def set_snat(api, name, snat):
+ updated = False
+ try:
+ current_state = get_snat_type(api, name)
+ if snat is None:
+ return updated
+ elif snat == 'None' and current_state != 'SRC_TRANS_NONE':
+ api.LocalLB.VirtualServer.set_source_address_translation_none(
+ virtual_servers=[name]
+ )
+ updated = True
+ elif snat == 'Automap' and current_state != 'SRC_TRANS_AUTOMAP':
+ api.LocalLB.VirtualServer.set_source_address_translation_automap(
+ virtual_servers=[name]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting snat : %s' % e)
+
+
+def get_snat_type(api, name):
+ return api.LocalLB.VirtualServer.get_source_address_translation_type(
+ virtual_servers=[name]
+ )[0]
+
+
+def get_pool(api, name):
+ return api.LocalLB.VirtualServer.get_default_pool_name(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_pool(api, name, pool):
+ updated = False
+ try:
+ current_pool = get_pool(api, name)
+ if pool is not None and (pool != current_pool):
+ api.LocalLB.VirtualServer.set_default_pool_name(
+ virtual_servers=[name],
+ default_pools=[pool]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting pool : %s' % e)
+
+
+def get_destination(api, name):
+ return api.LocalLB.VirtualServer.get_destination_v2(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_destination(api, name, destination):
+ updated = False
+ try:
+ current_destination = get_destination(api, name)
+ if destination is not None and destination != current_destination['address']:
+ api.LocalLB.VirtualServer.set_destination_v2(
+ virtual_servers=[name],
+ destinations=[{'address': destination, 'port': current_destination['port']}]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting destination : %s' % e)
+
+
+def set_port(api, name, port):
+ updated = False
+ try:
+ current_destination = get_destination(api, name)
+ if port is not None and port != current_destination['port']:
+ api.LocalLB.VirtualServer.set_destination_v2(
+ virtual_servers=[name],
+ destinations=[{'address': current_destination['address'], 'port': port}]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting port : %s' % e)
+
+
+def get_state(api, name):
+ return api.LocalLB.VirtualServer.get_enabled_state(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_state(api, name, state):
+ updated = False
+ try:
+ current_state = get_state(api, name)
+ # We consider that being present is equivalent to enabled
+ if state == 'present':
+ state = 'enabled'
+ if STATES[state] != current_state:
+ api.LocalLB.VirtualServer.set_enabled_state(
+ virtual_servers=[name],
+ states=[STATES[state]]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting state : %s' % e)
+
+
+def get_description(api, name):
+ return api.LocalLB.VirtualServer.get_description(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_description(api, name, description):
+ updated = False
+ try:
+ current_description = get_description(api, name)
+ if description is not None and current_description != description:
+ api.LocalLB.VirtualServer.set_description(
+ virtual_servers=[name],
+ descriptions=[description]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting description : %s ' % e)
+
+
+def get_persistence_profiles(api, name):
+ return api.LocalLB.VirtualServer.get_persistence_profile(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_default_persistence_profiles(api, name, persistence_profile):
+ updated = False
+ if persistence_profile is None:
+ return updated
+ try:
+ current_persistence_profiles = get_persistence_profiles(api, name)
+ default = None
+ for profile in current_persistence_profiles:
+ if profile['default_profile']:
+ default = profile['profile_name']
+ break
+ if default is not None and default != persistence_profile:
+ api.LocalLB.VirtualServer.remove_persistence_profile(
+ virtual_servers=[name],
+ profiles=[[{'profile_name': default, 'default_profile': True}]]
+ )
+ if default != persistence_profile:
+ api.LocalLB.VirtualServer.add_persistence_profile(
+ virtual_servers=[name],
+ profiles=[[{'profile_name': persistence_profile, 'default_profile': True}]]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting default persistence profile : %s' % e)
+
+
+def main():
+ argument_spec = f5_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'disabled', 'enabled']),
+ name=dict(type='str', required=True, aliases=['vs']),
+ destination=dict(type='str', aliases=['address', 'ip']),
+ port=dict(type='int'),
+ all_profiles=dict(type='list'),
+ all_rules=dict(type='list'),
+ all_enabled_vlans=dict(type='list'),
+ pool=dict(type='str'),
+ description=dict(type='str'),
+ snat=dict(type='str'),
+ default_persistence_profile=dict(type='str')
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ name = fq_name(partition, module.params['name'])
+ destination = module.params['destination']
+ port = module.params['port']
+ all_profiles = fq_list_names(partition, module.params['all_profiles'])
+ all_rules = fq_list_names(partition, module.params['all_rules'])
+ all_enabled_vlans = fq_list_names(partition, module.params['all_enabled_vlans'])
+ pool = fq_name(partition, module.params['pool'])
+ description = module.params['description']
+ snat = module.params['snat']
+ default_persistence_profile = fq_name(partition, module.params['default_persistence_profile'])
+
+ if 1 > port > 65535:
+ module.fail_json(msg="valid ports must be in range 1 - 65535")
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if not module.check_mode:
+ if vs_exists(api, name):
+ # hack to handle concurrent runs of module
+ # pool might be gone before we actually remove
+ try:
+ vs_remove(api, name)
+ result = {'changed': True, 'deleted': name}
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result['changed'] = False
+ else:
+ raise
+ else:
+ # check-mode return value
+ result = {'changed': True}
+
+ else:
+ update = False
+ if not vs_exists(api, name):
+ if (not destination) or (not port):
+ module.fail_json(msg="both destination and port must be supplied to create a VS")
+ if not module.check_mode:
+ # a bit of a hack to handle concurrent runs of this module.
+ # even though we've checked the virtual_server doesn't exist,
+ # it may exist by the time we run virtual_server().
+ # this catches the exception and does something smart
+ # about it!
+ try:
+ vs_create(api, name, destination, port, pool)
+ set_profiles(api, name, all_profiles)
+ set_enabled_vlans(api, name, all_enabled_vlans)
+ set_rules(api, name, all_rules)
+ set_snat(api, name, snat)
+ set_description(api, name, description)
+ set_default_persistence_profiles(api, name, default_persistence_profile)
+ set_state(api, name, state)
+ result = {'changed': True}
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on creating Virtual Server : %s' % e)
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ else:
+ update = True
+ if update:
+ # VS exists
+ if not module.check_mode:
+ # Have a transaction for all the changes
+ try:
+ api.System.Session.start_transaction()
+ result['changed'] |= set_destination(api, name, fq_name(partition, destination))
+ result['changed'] |= set_port(api, name, port)
+ result['changed'] |= set_pool(api, name, pool)
+ result['changed'] |= set_description(api, name, description)
+ result['changed'] |= set_snat(api, name, snat)
+ result['changed'] |= set_profiles(api, name, all_profiles)
+ result['changed'] |= set_enabled_vlans(api, name, all_enabled_vlans)
+ result['changed'] |= set_rules(api, name, all_rules)
+ result['changed'] |= set_default_persistence_profiles(api, name, default_persistence_profile)
+ result['changed'] |= set_state(api, name, state)
+ api.System.Session.submit_transaction()
+ except Exception as e:
+ raise Exception("Error on updating Virtual Server : %s" % e)
+ else:
+ # check-mode return value
+ result = {'changed': True}
+
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/f5/bigip_vlan.py b/lib/ansible/modules/extras/network/f5/bigip_vlan.py
new file mode 100644
index 0000000000..4e13d2508c
--- /dev/null
+++ b/lib/ansible/modules/extras/network/f5/bigip_vlan.py
@@ -0,0 +1,445 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigip_vlan
+short_description: Manage VLANs on a BIG-IP system
+description:
+ - Manage VLANs on a BIG-IP system
+version_added: "2.2"
+options:
+ description:
+ description:
+ - The description to give to the VLAN.
+ tagged_interfaces:
+ description:
+ - Specifies a list of tagged interfaces and trunks that you want to
+ configure for the VLAN. Use tagged interfaces or trunks when
+ you want to assign a single interface or trunk to multiple VLANs.
+ required: false
+ aliases:
+ - tagged_interface
+ untagged_interfaces:
+ description:
+ - Specifies a list of untagged interfaces and trunks that you want to
+ configure for the VLAN.
+ required: false
+ aliases:
+ - untagged_interface
+ name:
+ description:
+ - The VLAN to manage. If the special VLAN C(ALL) is specified with
+ the C(state) value of C(absent) then all VLANs will be removed.
+ required: true
+ state:
+ description:
+ - The state of the VLAN on the system. When C(present), guarantees
+ that the VLAN exists with the provided attributes. When C(absent),
+ removes the VLAN from the system.
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+ tag:
+ description:
+ - Tag number for the VLAN. The tag number can be any integer between 1
+ and 4094. The system automatically assigns a tag number if you do not
+ specify a value.
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires BIG-IP versions >= 12.0.0
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create VLAN
+ bigip_vlan:
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Set VLAN tag
+ bigip_vlan:
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ tag: "2345"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Add VLAN 2345 as tagged to interface 1.1
+ bigip_vlan:
+ tagged_interface: 1.1
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ tag: "2345"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Add VLAN 1234 as tagged to interfaces 1.1 and 1.2
+ bigip_vlan:
+ tagged_interfaces:
+ - 1.1
+ - 1.2
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ tag: "1234"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+description:
+ description: The description set on the VLAN
+ returned: changed
+ type: string
+ sample: foo VLAN
+interfaces:
+ description: Interfaces that the VLAN is assigned to
+ returned: changed
+ type: list
+ sample: ['1.1','1.2']
+name:
+ description: The name of the VLAN
+ returned: changed
+ type: string
+ sample: net1
+partition:
+ description: The partition that the VLAN was created on
+ returned: changed
+ type: string
+ sample: Common
+tag:
+ description: The ID of the VLAN
+ returned: changed
+ type: int
+ sample: 2345
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpVlan(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def present(self):
+ if self.exists():
+ return self.update()
+ else:
+ return self.create()
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ r = self.api.tm.net.vlans.vlan.load(
+ name=name,
+ partition=partition
+ )
+ ifcs = r.interfaces_s.get_collection()
+ if hasattr(r, 'tag'):
+ p['tag'] = int(r.tag)
+ if hasattr(r, 'description'):
+ p['description'] = str(r.description)
+ if len(ifcs) is not 0:
+ untagged = []
+ tagged = []
+ for x in ifcs:
+ if hasattr(x, 'tagged'):
+ tagged.append(str(x.name))
+ elif hasattr(x, 'untagged'):
+ untagged.append(str(x.name))
+ if untagged:
+ p['untagged_interfaces'] = list(set(untagged))
+ if tagged:
+ p['tagged_interfaces'] = list(set(tagged))
+ p['name'] = name
+ return p
+
+ def create(self):
+ params = dict()
+
+ check_mode = self.params['check_mode']
+ description = self.params['description']
+ name = self.params['name']
+ untagged_interfaces = self.params['untagged_interfaces']
+ tagged_interfaces = self.params['tagged_interfaces']
+ partition = self.params['partition']
+ tag = self.params['tag']
+
+ if tag is not None:
+ params['tag'] = tag
+
+ if untagged_interfaces is not None or tagged_interfaces is not None:
+ tmp = []
+ ifcs = self.api.tm.net.interfaces.get_collection()
+ ifcs = [str(x.name) for x in ifcs]
+
+ if len(ifcs) is 0:
+ raise F5ModuleError(
+ 'No interfaces were found'
+ )
+
+ pinterfaces = []
+ if untagged_interfaces:
+ interfaces = untagged_interfaces
+ elif tagged_interfaces:
+ interfaces = tagged_interfaces
+
+ for ifc in interfaces:
+ ifc = str(ifc)
+ if ifc in ifcs:
+ pinterfaces.append(ifc)
+
+ if tagged_interfaces:
+ tmp = [dict(name=x, tagged=True) for x in pinterfaces]
+ elif untagged_interfaces:
+ tmp = [dict(name=x, untagged=True) for x in pinterfaces]
+
+ if tmp:
+ params['interfaces'] = tmp
+
+ if description is not None:
+ params['description'] = self.params['description']
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ d = self.api.tm.net.vlans.vlan
+ d.create(**params)
+
+ if self.exists():
+ return True
+ else:
+ raise F5ModuleError("Failed to create the VLAN")
+
+ def update(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ description = self.params['description']
+ name = self.params['name']
+ tag = self.params['tag']
+ partition = self.params['partition']
+ tagged_interfaces = self.params['tagged_interfaces']
+ untagged_interfaces = self.params['untagged_interfaces']
+
+ if untagged_interfaces is not None or tagged_interfaces is not None:
+ ifcs = self.api.tm.net.interfaces.get_collection()
+ ifcs = [str(x.name) for x in ifcs]
+
+ if len(ifcs) is 0:
+ raise F5ModuleError(
+ 'No interfaces were found'
+ )
+
+ pinterfaces = []
+ if untagged_interfaces:
+ interfaces = untagged_interfaces
+ elif tagged_interfaces:
+ interfaces = tagged_interfaces
+
+ for ifc in interfaces:
+ ifc = str(ifc)
+ if ifc in ifcs:
+ pinterfaces.append(ifc)
+ else:
+ raise F5ModuleError(
+ 'The specified interface "%s" was not found' % (ifc)
+ )
+
+ if tagged_interfaces:
+ tmp = [dict(name=x, tagged=True) for x in pinterfaces]
+ if 'tagged_interfaces' in current:
+ if pinterfaces != current['tagged_interfaces']:
+ params['interfaces'] = tmp
+ else:
+ params['interfaces'] = tmp
+ elif untagged_interfaces:
+ tmp = [dict(name=x, untagged=True) for x in pinterfaces]
+ if 'untagged_interfaces' in current:
+ if pinterfaces != current['untagged_interfaces']:
+ params['interfaces'] = tmp
+ else:
+ params['interfaces'] = tmp
+
+ if description is not None:
+ if 'description' in current:
+ if description != current['description']:
+ params['description'] = description
+ else:
+ params['description'] = description
+
+ if tag is not None:
+ if 'tag' in current:
+ if tag != current['tag']:
+ params['tag'] = tag
+ else:
+ params['tag'] = tag
+
+ if params:
+ changed = True
+ params['name'] = name
+ params['partition'] = partition
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ else:
+ return changed
+
+ r = self.api.tm.net.vlans.vlan.load(
+ name=name,
+ partition=partition
+ )
+ r.update(**params)
+ r.refresh()
+
+ return True
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ dc = self.api.tm.net.vlans.vlan.load(**params)
+ dc.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the VLAN")
+ return True
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+ return self.api.tm.net.vlans.vlan.exists(
+ name=name,
+ partition=partition
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ description=dict(required=False, default=None),
+ tagged_interfaces=dict(required=False, default=None, type='list', aliases=['tagged_interface']),
+ untagged_interfaces=dict(required=False, default=None, type='list', aliases=['untagged_interface']),
+ name=dict(required=True),
+ tag=dict(required=False, default=None, type='int')
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['tagged_interfaces', 'untagged_interfaces']
+ ]
+ )
+
+ try:
+ obj = BigIpVlan(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/haproxy.py b/lib/ansible/modules/extras/network/haproxy.py
new file mode 100644
index 0000000000..2fc11987d5
--- /dev/null
+++ b/lib/ansible/modules/extras/network/haproxy.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Ravi Bhure <ravibhure@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: haproxy
+version_added: "1.9"
+short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands.
+description:
+ - Enable, disable, and set weights for HAProxy backend servers using socket
+ commands.
+notes:
+ - Enable and disable commands are restricted and can only be issued on
+ sockets configured for level 'admin'. For example, you can add the line
+ 'stats socket /var/run/haproxy.sock level admin' to the general section of
+ haproxy.cfg. See http://haproxy.1wt.eu/download/1.5/doc/configuration.txt.
+options:
+ backend:
+ description:
+ - Name of the HAProxy backend pool.
+ required: false
+ default: auto-detected
+ host:
+ description:
+ - Name of the backend host to change.
+ required: true
+ default: null
+ shutdown_sessions:
+ description:
+ - When disabling a server, immediately terminate all the sessions attached
+ to the specified server. This can be used to terminate long-running
+ sessions after a server is put into maintenance mode.
+ required: false
+ default: false
+ socket:
+ description:
+ - Path to the HAProxy socket file.
+ required: false
+ default: /var/run/haproxy.sock
+ state:
+ description:
+ - Desired state of the provided backend host.
+ required: true
+ default: null
+ choices: [ "enabled", "disabled" ]
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist
+ required: false
+ default: false
+ version_added: "2.2"
+ wait:
+ description:
+ - Wait until the server reports a status of 'UP' when `state=enabled`, or
+ status of 'MAINT' when `state=disabled`.
+ required: false
+ default: false
+ version_added: "2.0"
+ wait_interval:
+ description:
+ - Number of seconds to wait between retries.
+ required: false
+ default: 5
+ version_added: "2.0"
+ wait_retries:
+ description:
+ - Number of times to check for status after changing the state.
+ required: false
+ default: 25
+ version_added: "2.0"
+ weight:
+ description:
+ - The value passed in argument. If the value ends with the `%` sign, then
+ the new weight will be relative to the initially configured weight.
+ Relative weights are only permitted between 0 and 100% and absolute
+ weights are permitted between 0 and 256.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+# disable server in 'www' backend pool
+- haproxy: state=disabled host={{ inventory_hostname }} backend=www
+
+# disable server without backend pool name (apply to all available backend pool)
+- haproxy: state=disabled host={{ inventory_hostname }}
+
+# disable server, provide socket file
+- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www
+
+# disable server, provide socket file, wait until status reports in maintenance
+- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes
+
+# disable backend server in 'www' backend pool and drop open sessions to it
+- haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true
+
+# disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+- haproxy: state=disabled host={{ inventory_hostname }} fail_on_not_found=yes
+
+# enable server in 'www' backend pool
+- haproxy: state=enabled host={{ inventory_hostname }} backend=www
+
+# enable server in 'www' backend pool wait until healthy
+- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes
+
+# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
+- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5
+
+# enable server in 'www' backend pool with change server(s) weight
+- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www
+
+author: "Ravi Bhure (@ravibhure)"
+'''
+
+import socket
+import csv
+import time
+from string import Template
+
+
+DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock"
+RECV_SIZE = 1024
+ACTION_CHOICES = ['enabled', 'disabled']
+WAIT_RETRIES=25
+WAIT_INTERVAL=5
+
+######################################################################
+class TimeoutException(Exception):
+ pass
+
+class HAProxy(object):
+ """
+ Used for communicating with HAProxy through its local UNIX socket interface.
+ Perform common tasks in Haproxy related to enable server and
+ disable server.
+
+ The complete set of external commands Haproxy handles is documented
+ on their website:
+
+ http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
+ """
+
+ def __init__(self, module):
+ self.module = module
+
+ self.state = self.module.params['state']
+ self.host = self.module.params['host']
+ self.backend = self.module.params['backend']
+ self.weight = self.module.params['weight']
+ self.socket = self.module.params['socket']
+ self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
+ self.wait = self.module.params['wait']
+ self.wait_retries = self.module.params['wait_retries']
+ self.wait_interval = self.module.params['wait_interval']
+ self.command_results = {}
+
+ def execute(self, cmd, timeout=200, capture_output=True):
+ """
+ Executes a HAProxy command by sending a message to a HAProxy's local
+ UNIX socket and waiting up to 'timeout' milliseconds for the response.
+ """
+ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.client.connect(self.socket)
+ self.client.sendall('%s\n' % cmd)
+ result = ''
+ buf = ''
+ buf = self.client.recv(RECV_SIZE)
+ while buf:
+ result += buf
+ buf = self.client.recv(RECV_SIZE)
+ if capture_output:
+ self.capture_command_output(cmd, result.strip())
+ self.client.close()
+ return result
+
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if not 'command' in self.command_results.keys():
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if not 'output' in self.command_results.keys():
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))
+
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status = None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found or self.wait) and state is None:
+ self.module.fail_json(msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ self.execute(Template(cmd).substitute(pxname = backend, svname = svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = map(lambda d: { 'status': d['status'], 'weight': d['weight'] }, filter(lambda d: (pxname is None or d['pxname'] == pxname) and d['svname'] == svname, r))
+ return state or None
+
+
+ def wait_until_status(self, pxname, svname, status):
+ """
+ Wait for a service to reach the specified status. Try RETRIES times
+ with INTERVAL seconds of sleep in between. If the service has not reached
+ the expected status in that time, the module will fail. If the service was
+ not found, the module will fail.
+ """
+ for i in range(1, self.wait_retries):
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ if state[0]['status'] == status:
+ return True
+ else:
+ time.sleep(self.wait_interval)
+
+ self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries))
+
+
+ def enabled(self, host, backend, weight):
+ """
+ Enabled action, marks server to UP and checks are re-enabled,
+ also supports to get current weight for server (default) and
+ set the weight for haproxy backend server when provides.
+ """
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
+
+
+ def disabled(self, host, backend, shutdown_sessions):
+ """
+ Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance,
+ also it shutdown sessions while disabling backend host server.
+ """
+ cmd = "get weight $pxname/$svname; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
+
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do it.
+ """
+ # Get the state before the run
+ state_before = self.get_state_for(self.backend, self.host)
+ self.command_results['state_before'] = state_before
+
+ # toggle enable/disbale server
+ if self.state == 'enabled':
+ self.enabled(self.host, self.backend, self.weight)
+ elif self.state == 'disabled':
+ self.disabled(self.host, self.backend, self.shutdown_sessions)
+ else:
+ self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
+
+ # Get the state after the run
+ state_after = self.get_state_for(self.backend, self.host)
+ self.command_results['state_after'] = state_after
+
+ # Report change status
+ if state_before != state_after:
+ self.command_results['changed'] = True
+ self.module.exit_json(**self.command_results)
+ else:
+ self.command_results['changed'] = False
+ self.module.exit_json(**self.command_results)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(required=True, default=None, choices=ACTION_CHOICES),
+ host=dict(required=True, default=None),
+ backend=dict(required=False, default=None),
+ weight=dict(required=False, default=None),
+ socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION),
+ shutdown_sessions=dict(required=False, default=False, type='bool'),
+ fail_on_not_found=dict(required=False, default=False, type='bool'),
+ wait=dict(required=False, default=False, type='bool'),
+ wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
+ wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
+ ),
+ )
+
+ if not socket:
+ module.fail_json(msg="unable to locate haproxy socket")
+
+ ansible_haproxy = HAProxy(module)
+ ansible_haproxy.act()
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/network/illumos/__init__.py b/lib/ansible/modules/extras/network/illumos/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/illumos/__init__.py
diff --git a/lib/ansible/modules/extras/network/illumos/dladm_etherstub.py b/lib/ansible/modules/extras/network/illumos/dladm_etherstub.py
new file mode 100644
index 0000000000..72b2e6759f
--- /dev/null
+++ b/lib/ansible/modules/extras/network/illumos/dladm_etherstub.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: dladm_etherstub
+short_description: Manage etherstubs on Solaris/illumos systems.
+description:
+ - Create or delete etherstubs on Solaris/illumos systems.
+version_added: "2.2"
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - Etherstub name.
+ required: true
+ temporary:
+ description:
+ - Specifies that the etherstub is temporary. Temporary etherstubs
+ do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Create or delete Solaris/illumos etherstub.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+# Create 'stub0' etherstub
+dladm_etherstub: name=stub0 state=present
+
+# Remove 'stub0 etherstub
+dladm_etherstub: name=stub0 state=absent
+'''
+
+RETURN = '''
+name:
+ description: etherstub name
+ returned: always
+ type: string
+ sample: "switch0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: etherstub's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+'''
+
+
+class Etherstub(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def etherstub_exists(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('show-etherstub')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create_etherstub(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('create-etherstub')
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_etherstub(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('delete-etherstub')
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ temporary=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ etherstub = Etherstub(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = etherstub.name
+ result['state'] = etherstub.state
+ result['temporary'] = etherstub.temporary
+
+ if etherstub.state == 'absent':
+ if etherstub.etherstub_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = etherstub.delete_etherstub()
+ if rc != 0:
+ module.fail_json(name=etherstub.name, msg=err, rc=rc)
+ elif etherstub.state == 'present':
+ if not etherstub.etherstub_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = etherstub.create_etherstub()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=etherstub.name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/illumos/dladm_vnic.py b/lib/ansible/modules/extras/network/illumos/dladm_vnic.py
new file mode 100644
index 0000000000..e47b98b97a
--- /dev/null
+++ b/lib/ansible/modules/extras/network/illumos/dladm_vnic.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: dladm_vnic
+short_description: Manage VNICs on Solaris/illumos systems.
+description:
+ - Create or delete VNICs on Solaris/illumos systems.
+version_added: "2.2"
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - VNIC name.
+ required: true
+ link:
+ description:
+ - VNIC underlying link name.
+ required: true
+ temporary:
+ description:
+ - Specifies that the VNIC is temporary. Temporary VNICs
+ do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ mac:
+ description:
+ - Sets the VNIC's MAC address. Must be valid unicast MAC address.
+ required: false
+ default: false
+ aliases: [ "macaddr" ]
+ vlan:
+ description:
+ - Enable VLAN tagging for this VNIC. The VLAN tag will have id
+ I(vlan).
+ required: false
+ default: false
+ aliases: [ "vlan_id" ]
+ state:
+ description:
+ - Create or delete Solaris/illumos VNIC.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+# Create 'vnic0' VNIC over 'bnx0' link
+dladm_vnic: name=vnic0 link=bnx0 state=present
+
+# Create VNIC with specified MAC and VLAN tag over 'aggr0'
+dladm_vnic: name=vnic1 link=aggr0 mac=00:00:5E:00:53:23 vlan=4
+
+# Remove 'vnic0' VNIC
+dladm_vnic: name=vnic0 link=bnx0 state=absent
+'''
+
+RETURN = '''
+name:
+ description: VNIC name
+ returned: always
+ type: string
+ sample: "vnic0"
+link:
+ description: VNIC underlying link name
+ returned: always
+ type: string
+ sample: "igb0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: VNIC's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+mac:
+ description: MAC address to use for VNIC
+ returned: if mac is specified
+ type: string
+ sample: "00:00:5E:00:53:42"
+vlan:
+ description: VLAN to use for VNIC
+ returned: success
+ type: int
+ sample: 42
+'''
+
+import re
+
+
+class VNIC(object):
+
+ UNICAST_MAC_REGEX = r'^[a-f0-9][2-9a-f0]:([a-f0-9]{2}:){4}[a-f0-9]{2}$'
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.link = module.params['link']
+ self.mac = module.params['mac']
+ self.vlan = module.params['vlan']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def vnic_exists(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('show-vnic')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create_vnic(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('create-vnic')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ if self.mac:
+ cmd.append('-m')
+ cmd.append(self.mac)
+
+ if self.vlan:
+ cmd.append('-v')
+ cmd.append(self.vlan)
+
+ cmd.append('-l')
+ cmd.append(self.link)
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_vnic(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('delete-vnic')
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def is_valid_unicast_mac(self):
+
+ mac_re = re.match(self.UNICAST_MAC_REGEX, self.mac)
+
+ return mac_re is None
+
+ def is_valid_vlan_id(self):
+
+ return 0 <= self.vlan <= 4095
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ link=dict(required=True),
+ mac=dict(default=None, aliases=['macaddr']),
+ vlan=dict(default=None, aliases=['vlan_id']),
+ temporary=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ vnic = VNIC(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = vnic.name
+ result['link'] = vnic.link
+ result['state'] = vnic.state
+ result['temporary'] = vnic.temporary
+
+ if vnic.mac is not None:
+ if vnic.is_valid_unicast_mac():
+ module.fail_json(msg='Invalid unicast MAC address',
+ mac=vnic.mac,
+ name=vnic.name,
+ state=vnic.state,
+ link=vnic.link,
+ vlan=vnic.vlan)
+ result['mac'] = vnic.mac
+
+ if vnic.vlan is not None:
+ if vnic.is_valid_vlan_id():
+ module.fail_json(msg='Invalid VLAN tag',
+ mac=vnic.mac,
+ name=vnic.name,
+ state=vnic.state,
+ link=vnic.link,
+ vlan=vnic.vlan)
+ result['vlan'] = vnic.vlan
+
+ if vnic.state == 'absent':
+ if vnic.vnic_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = vnic.delete_vnic()
+ if rc != 0:
+ module.fail_json(name=vnic.name, msg=err, rc=rc)
+ elif vnic.state == 'present':
+ if not vnic.vnic_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = vnic.create_vnic()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=vnic.name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/illumos/flowadm.py b/lib/ansible/modules/extras/network/illumos/flowadm.py
new file mode 100644
index 0000000000..73cc91af44
--- /dev/null
+++ b/lib/ansible/modules/extras/network/illumos/flowadm.py
@@ -0,0 +1,503 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Števko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: flowadm
+short_description: Manage bandwidth resource control and priority for protocols, services and zones.
+description:
+ - Create/modify/remove networking bandwidth and associated resources for a type of traffic on a particular link.
+version_added: "2.2"
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description: >
+ - A flow is defined as a set of attributes based on Layer 3 and Layer 4
+ headers, which can be used to identify a protocol, service, or a zone.
+ required: true
+ aliases: [ 'flow' ]
+ link:
+ description:
+ - Specifiies a link to configure flow on.
+ required: false
+ local_ip:
+ description:
+ - Identifies a network flow by the local IP address.
+ required: false
+ remove_ip:
+ description:
+ - Identifies a network flow by the remote IP address.
+ required: false
+ transport:
+ description: >
+ - Specifies a Layer 4 protocol to be used. It is typically used in combination with I(local_port) to
+ identify the service that needs special attention.
+ required: false
+ local_port:
+ description:
+ - Identifies a service specified by the local port.
+ required: false
+ dsfield:
+ description: >
+ - Identifies the 8-bit differentiated services field (as defined in
+ RFC 2474). The optional dsfield_mask is used to state the bits of interest in
+ the differentiated services field when comparing with the dsfield
+ value. Both values must be in hexadecimal.
+ required: false
+ maxbw:
+ description: >
+ - Sets the full duplex bandwidth for the flow. The bandwidth is
+ specified as an integer with one of the scale suffixes(K, M, or G
+ for Kbps, Mbps, and Gbps). If no units are specified, the input
+ value will be read as Mbps.
+ required: false
+ priority:
+ description:
+ - Sets the relative priority for the flow.
+ required: false
+ default: 'medium'
+ choices: [ 'low', 'medium', 'high' ]
+ temporary:
+ description:
+ - Specifies that the configured flow is temporary. Temporary
+ flows do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Create/delete/enable/disable an IP address on the network interface.
+ required: false
+ default: present
+ choices: [ 'absent', 'present', 'resetted' ]
+'''
+
+EXAMPLES = '''
+# Limit SSH traffic to 100M via vnic0 interface
+flowadm: link=vnic0 flow=ssh_out transport=tcp local_port=22 maxbw=100M state=present
+
+# Reset flow properties
+flowadm: name=dns state=resetted
+
+# Configure policy for EF PHB (DSCP value of 101110 from RFC 2598) with a bandwidth of 500 Mbps and a high priority.
+flowadm: link=bge0 dsfield=0x2e:0xfc maxbw=500M priority=high flow=efphb-flow state=present
+'''
+
+RETURN = '''
+name:
+ description: flow name
+ returned: always
+ type: string
+ sample: "http_drop"
+link:
+ description: flow's link
+ returned: if link is defined
+ type: string
+ sample: "vnic0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: flow's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+priority:
+ description: flow's priority
+ returned: if priority is defined
+ type: string
+ sample: "low"
+transport:
+ description: flow's transport
+ returned: if transport is defined
+ type: string
+ sample: "tcp"
+maxbw:
+ description: flow's maximum bandwidth
+ returned: if maxbw is defined
+ type: string
+ sample: "100M"
+local_Ip:
+ description: flow's local IP address
+ returned: if local_ip is defined
+ type: string
+ sample: "10.0.0.42"
+local_port:
+ description: flow's local port
+ returned: if local_port is defined
+ type: int
+ sample: 1337
+remote_Ip:
+ description: flow's remote IP address
+ returned: if remote_ip is defined
+ type: string
+ sample: "10.0.0.42"
+dsfield:
+ description: flow's differentiated services value
+ returned: if dsfield is defined
+ type: string
+ sample: "0x2e:0xfc"
+'''
+
+
+import socket
+
+SUPPORTED_TRANSPORTS = ['tcp', 'udp', 'sctp', 'icmp', 'icmpv6']
+SUPPORTED_PRIORITIES = ['low', 'medium', 'high']
+
+SUPPORTED_ATTRIBUTES = ['local_ip', 'remote_ip', 'transport', 'local_port', 'dsfield']
+SUPPORTPED_PROPERTIES = ['maxbw', 'priority']
+
+
+class Flow(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.link = module.params['link']
+ self.local_ip = module.params['local_ip']
+ self.remote_ip = module.params['remote_ip']
+ self.transport = module.params['transport']
+ self.local_port = module.params['local_port']
+ self.dsfield = module.params['dsfield']
+ self.maxbw = module.params['maxbw']
+ self.priority = module.params['priority']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ self._needs_updating = {
+ 'maxbw': False,
+ 'priority': False,
+ }
+
+ @classmethod
+ def is_valid_port(cls, port):
+ return 1 <= int(port) <= 65535
+
+ @classmethod
+ def is_valid_address(cls, ip):
+
+ if ip.count('/') == 1:
+ ip_address, netmask = ip.split('/')
+ else:
+ ip_address = ip
+
+ if len(ip_address.split('.')) == 4:
+ try:
+ socket.inet_pton(socket.AF_INET, ip_address)
+ except socket.error:
+ return False
+
+ if not 0 <= netmask <= 32:
+ return False
+ else:
+ try:
+ socket.inet_pton(socket.AF_INET6, ip_address)
+ except socket.error:
+ return False
+
+ if not 0 <= netmask <= 128:
+ return False
+
+ return True
+
+ @classmethod
+ def is_hex(cls, number):
+ try:
+ int(number, 16)
+ except ValueError:
+ return False
+
+ return True
+
+ @classmethod
+ def is_valid_dsfield(cls, dsfield):
+
+ dsmask = None
+
+ if dsfield.count(':') == 1:
+ dsval = dsfield.split(':')[0]
+ else:
+ dsval, dsmask = dsfield.split(':')
+
+ if dsmask and not 0x01 <= int(dsmask, 16) <= 0xff and not 0x01 <= int(dsval, 16) <= 0xff:
+ return False
+ elif not 0x01 <= int(dsval, 16) <= 0xff:
+ return False
+
+ return True
+
+ def flow_exists(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('show-flow')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def delete_flow(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('remove-flow')
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def create_flow(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('add-flow')
+ cmd.append('-l')
+ cmd.append(self.link)
+
+ if self.local_ip:
+ cmd.append('-a')
+ cmd.append('local_ip=' + self.local_ip)
+
+ if self.remote_ip:
+ cmd.append('-a')
+ cmd.append('remote_ip=' + self.remote_ip)
+
+ if self.transport:
+ cmd.append('-a')
+ cmd.append('transport=' + self.transport)
+
+ if self.local_port:
+ cmd.append('-a')
+ cmd.append('local_port=' + self.local_port)
+
+ if self.dsfield:
+ cmd.append('-a')
+ cmd.append('dsfield=' + self.dsfield)
+
+ if self.maxbw:
+ cmd.append('-p')
+ cmd.append('maxbw=' + self.maxbw)
+
+ if self.priority:
+ cmd.append('-p')
+ cmd.append('priority=' + self.priority)
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def _query_flow_props(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('show-flowprop')
+ cmd.append('-c')
+ cmd.append('-o')
+ cmd.append('property,possible')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def flow_needs_udpating(self):
+ (rc, out, err) = self._query_flow_props()
+
+ NEEDS_UPDATING = False
+
+ if rc == 0:
+ properties = (line.split(':') for line in out.rstrip().split('\n'))
+ for prop, value in properties:
+ if prop == 'maxbw' and self.maxbw != value:
+ self._needs_updating.update({prop: True})
+ NEEDS_UPDATING = True
+
+ elif prop == 'priority' and self.priority != value:
+ self._needs_updating.update({prop: True})
+ NEEDS_UPDATING = True
+
+ return NEEDS_UPDATING
+ else:
+ self.module.fail_json(msg='Error while checking flow properties: %s' % err,
+ stderr=err,
+ rc=rc)
+
+ def update_flow(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('set-flowprop')
+
+ if self.maxbw and self._needs_updating['maxbw']:
+ cmd.append('-p')
+ cmd.append('maxbw=' + self.maxbw)
+
+ if self.priority and self._needs_updating['priority']:
+ cmd.append('-p')
+ cmd.append('priority=' + self.priority)
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['flow']),
+ link=dict(required=False),
+ local_ip=dict(required=False),
+ remote_ip=dict(required=False),
+ transport=dict(required=False, choices=SUPPORTED_TRANSPORTS),
+ local_port=dict(required=False),
+ dsfield=dict(required=False),
+ maxbw=dict(required=False),
+ priority=dict(required=False,
+ default='medium',
+ choices=SUPPORTED_PRIORITIES),
+ temporary=dict(default=False, type='bool'),
+ state=dict(required=False,
+ default='present',
+ choices=['absent', 'present', 'resetted']),
+ ),
+ mutually_exclusive=[
+ ('local_ip', 'remote_ip'),
+ ('local_ip', 'transport'),
+ ('local_ip', 'local_port'),
+ ('local_ip', 'dsfield'),
+ ('remote_ip', 'transport'),
+ ('remote_ip', 'local_port'),
+ ('remote_ip', 'dsfield'),
+ ('transport', 'dsfield'),
+ ('local_port', 'dsfield'),
+ ],
+ supports_check_mode=True
+ )
+
+ flow = Flow(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = flow.name
+ result['state'] = flow.state
+ result['temporary'] = flow.temporary
+
+ if flow.link:
+ result['link'] = flow.link
+
+ if flow.maxbw:
+ result['maxbw'] = flow.maxbw
+
+ if flow.priority:
+ result['priority'] = flow.priority
+
+ if flow.local_ip:
+ if flow.is_valid_address(flow.local_ip):
+ result['local_ip'] = flow.local_ip
+
+ if flow.remote_ip:
+ if flow.is_valid_address(flow.remote_ip):
+ result['remote_ip'] = flow.remote_ip
+
+ if flow.transport:
+ result['transport'] = flow.transport
+
+ if flow.local_port:
+ if flow.is_valid_port(flow.local_port):
+ result['local_port'] = flow.local_port
+ else:
+ module.fail_json(msg='Invalid port: %s' % flow.local_port,
+ rc=1)
+
+ if flow.dsfield:
+ if flow.is_valid_dsfield(flow.dsfield):
+ result['dsfield'] = flow.dsfield
+ else:
+ module.fail_json(msg='Invalid dsfield: %s' % flow.dsfield,
+ rc=1)
+
+ if flow.state == 'absent':
+ if flow.flow_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = flow.delete_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while deleting flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+
+ elif flow.state == 'present':
+ if not flow.flow_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = flow.create_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while creating flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+ else:
+ if flow.flow_needs_udpating():
+ (rc, out, err) = flow.update_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while updating flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+
+ elif flow.state == 'resetted':
+ if flow.flow_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = flow.reset_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while resetting flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/illumos/ipadm_if.py b/lib/ansible/modules/extras/network/illumos/ipadm_if.py
new file mode 100644
index 0000000000..c7419848fc
--- /dev/null
+++ b/lib/ansible/modules/extras/network/illumos/ipadm_if.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: ipadm_if
+short_description: Manage IP interfaces on Solaris/illumos systems.
+description:
+ - Create, delete, enable or disable IP interfaces on Solaris/illumos
+ systems.
+version_added: "2.2"
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - IP interface name.
+ required: true
+ temporary:
+ description:
+ - Specifies that the IP interface is temporary. Temporary IP
+ interfaces do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Create or delete Solaris/illumos IP interfaces.
+ required: false
+ default: "present"
+ choices: [ "present", "absent", "enabled", "disabled" ]
+'''
+
+EXAMPLES = '''
+# Create vnic0 interface
+ipadm_if: name=vnic0 state=enabled
+
+# Disable vnic0 interface
+ipadm_if: name=vnic0 state=disabled
+'''
+
+RETURN = '''
+name:
+ description: IP interface name
+ returned: always
+ type: string
+ sample: "vnic0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: persistence of a IP interface
+ returned: always
+ type: boolean
+ sample: "True"
+'''
+
+
+class IPInterface(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def interface_exists(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('show-if')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def interface_is_disabled(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('show-if')
+ cmd.append('-o')
+ cmd.append('state')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(name=self.name, rc=rc, msg=err)
+
+ return 'disabled' in out
+
+ def create_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('create-if')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('delete-if')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def enable_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('enable-if')
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def disable_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('disable-if')
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ temporary=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['absent',
+ 'present',
+ 'enabled',
+ 'disabled']),
+ ),
+ supports_check_mode=True
+ )
+
+ interface = IPInterface(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = interface.name
+ result['state'] = interface.state
+ result['temporary'] = interface.temporary
+
+ if interface.state == 'absent':
+ if interface.interface_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = interface.delete_interface()
+ if rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+ elif interface.state == 'present':
+ if not interface.interface_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = interface.create_interface()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+
+ elif interface.state == 'enabled':
+ if interface.interface_is_disabled():
+ (rc, out, err) = interface.enable_interface()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+
+ elif interface.state == 'disabled':
+ if not interface.interface_is_disabled():
+ (rc, out, err) = interface.disable_interface()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/illumos/ipadm_prop.py b/lib/ansible/modules/extras/network/illumos/ipadm_prop.py
new file mode 100644
index 0000000000..5399189ad3
--- /dev/null
+++ b/lib/ansible/modules/extras/network/illumos/ipadm_prop.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: ipadm_prop
+short_description: Manage protocol properties on Solaris/illumos systems.
+description:
+ - Modify protocol properties on Solaris/illumos systems.
+version_added: "2.2"
+author: Adam Števko (@xen0l)
+options:
+ protocol:
+ description:
+ - Specifies the procotol for which we want to manage properties.
+ required: true
+ property:
+ description:
+ - Specifies the name of property we want to manage.
+ required: true
+ value:
+ description:
+ - Specifies the value we want to set for the property.
+ required: false
+ temporary:
+ description:
+ - Specifies that the property value is temporary. Temporary
+ property values do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Set or reset the property value.
+ required: false
+ default: present
+ choices: [ "present", "absent", "reset" ]
+'''
+
+EXAMPLES = '''
+# Set TCP receive buffer size
+ipadm_prop: protocol=tcp property=recv_buf value=65536
+
+# Reset UDP send buffer size to the default value
+ipadm_prop: protocol=udp property=send_buf state=reset
+'''
+
+RETURN = '''
+protocol:
+ description: property's protocol
+ returned: always
+ type: string
+ sample: "TCP"
+property:
+ description: name of the property
+ returned: always
+ type: string
+ sample: "recv_maxbuf"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: property's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+value:
+ description: value of the property
+ returned: always
+ type: int/string (depends on property)
+ sample: 1024/never
+'''
+
+SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp']
+
+
+class Prop(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.protocol = module.params['protocol']
+ self.property = module.params['property']
+ self.value = module.params['value']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def property_exists(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('show-prop')
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ self.module.fail_json(msg='Unknown property "%s" for protocol %s' %
+ (self.property, self.protocol),
+ protocol=self.protocol,
+ property=self.property)
+
+ def property_is_modified(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('show-prop')
+ cmd.append('-c')
+ cmd.append('-o')
+ cmd.append('current,default')
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ (rc, out, _) = self.module.run_command(cmd)
+
+ out = out.rstrip()
+ (value, default) = out.split(':')
+
+ if rc == 0 and value == default:
+ return True
+ else:
+ return False
+
+ def property_is_set(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('show-prop')
+ cmd.append('-c')
+ cmd.append('-o')
+ cmd.append('current')
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ (rc, out, _) = self.module.run_command(cmd)
+
+ out = out.rstrip()
+
+ if rc == 0 and self.value == out:
+ return True
+ else:
+ return False
+
+ def set_property(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('set-prop')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append('-p')
+ cmd.append(self.property + "=" + self.value)
+ cmd.append(self.protocol)
+
+ return self.module.run_command(cmd)
+
+ def reset_property(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('reset-prop')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS),
+ property=dict(required=True),
+ value=dict(required=False),
+ temporary=dict(default=False, type='bool'),
+ state=dict(
+ default='present', choices=['absent', 'present', 'reset']),
+ ),
+ supports_check_mode=True
+ )
+
+ prop = Prop(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['protocol'] = prop.protocol
+ result['property'] = prop.property
+ result['state'] = prop.state
+ result['temporary'] = prop.temporary
+ if prop.value:
+ result['value'] = prop.value
+
+ if prop.state == 'absent' or prop.state == 'reset':
+ if prop.property_exists():
+ if not prop.property_is_modified():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = prop.reset_property()
+ if rc != 0:
+ module.fail_json(protocol=prop.protocol,
+ property=prop.property,
+ msg=err,
+ rc=rc)
+
+ elif prop.state == 'present':
+ if prop.value is None:
+ module.fail_json(msg='Value is mandatory with state "present"')
+
+ if prop.property_exists():
+ if not prop.property_is_set():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = prop.set_property()
+ if rc != 0:
+ module.fail_json(protocol=prop.protocol,
+ property=prop.property,
+ msg=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/ipify_facts.py b/lib/ansible/modules/extras/network/ipify_facts.py
new file mode 100644
index 0000000000..95bf549be9
--- /dev/null
+++ b/lib/ansible/modules/extras/network/ipify_facts.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway.
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ required: false
+ default: 'https://api.ipify.org'
+notes:
+ - "Visit https://www.ipify.org to get more information."
+'''
+
+EXAMPLES = '''
+# Gather IP facts from ipify.org
+- name: get my public IP
+ ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint
+- name: get my public IP
+ ipify_facts: api_url=http://api.example.com/ipify
+'''
+
+RETURN = '''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module, self.api_url + "?format=json" , force=True)
+ if response:
+ data = json.loads(response.read())
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_url = dict(default='https://api.ipify.org'),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/lldp.py b/lib/ansible/modules/extras/network/lldp.py
new file mode 100644
index 0000000000..fd1b1092d5
--- /dev/null
+++ b/lib/ansible/modules/extras/network/lldp.py
@@ -0,0 +1,86 @@
+#!/usr/bin/python -tt
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import subprocess
+
+DOCUMENTATION = '''
+---
+module: lldp
+requirements: [ lldpctl ]
+version_added: 1.6
+short_description: get details reported by lldp
+description:
+ - Reads data out of lldpctl
+options: {}
+author: "Andy Hill (@andyhky)"
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ lldp:
+
+ - name: Print each switch/port
+ debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}
+ with_items: lldp.keys()
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+def gather_lldp():
+ cmd = ['lldpctl', '-f', 'keyvalue']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ (output, err) = proc.communicate()
+ if output:
+ output_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry.startswith('lldp'):
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+ else:
+ value = current_dict[final] + '\n' + entry
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp()
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/network/netconf/__init__.py b/lib/ansible/modules/extras/network/netconf/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/network/netconf/__init__.py
diff --git a/lib/ansible/modules/extras/network/netconf/netconf_config.py b/lib/ansible/modules/extras/network/netconf/netconf_config.py
new file mode 100755
index 0000000000..43baa63a5d
--- /dev/null
+++ b/lib/ansible/modules/extras/network/netconf/netconf_config.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+
+# (c) 2016, Leandro Lisboa Penz <lpenz at lpenz.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: netconf_config
+author: "Leandro Lisboa Penz (@lpenz)"
+short_description: netconf device configuration
+description:
+ - Netconf is a network management protocol developed and standardized by
+ the IETF. It is documented in RFC 6241.
+
+ - This module allows the user to send a configuration XML file to a netconf
+ device, and detects if there was a configuration change.
+notes:
+ - This module supports devices with and without the the candidate and
+ confirmed-commit capabilities. It always use the safer feature.
+version_added: "2.2"
+options:
+ host:
+ description:
+ - the hostname or ip address of the netconf device
+ required: true
+ port:
+ description:
+ - the netconf port
+ default: 830
+ required: false
+ hostkey_verify:
+ description:
+ - if true, the ssh host key of the device must match a ssh key present on the host
+ - if false, the ssh host key of the device is not checked
+ default: true
+ required: false
+ username:
+ description:
+ - the username to authenticate with
+ required: true
+ password:
+ description:
+ - password of the user to authenticate with
+ required: true
+ xml:
+ description:
+ - the XML content to send to the device
+ required: true
+
+
+requirements:
+ - "python >= 2.6"
+ - "ncclient"
+'''
+
+EXAMPLES = '''
+- name: set ntp server in the device
+ netconf_config:
+ host: 10.0.0.1
+ username: admin
+ password: admin
+ xml: |
+ <config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <system xmlns="urn:ietf:params:xml:ns:yang:ietf-system">
+ <ntp>
+ <enabled>true</enabled>
+ <server>
+ <name>ntp1</name>
+ <udp><address>127.0.0.1</address></udp>
+ </server>
+ </ntp>
+ </system>
+ </config>
+
+- name: wipe ntp configuration
+ netconf_config:
+ host: 10.0.0.1
+ username: admin
+ password: admin
+ xml: |
+ <config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <system xmlns="urn:ietf:params:xml:ns:yang:ietf-system">
+ <ntp>
+ <enabled>false</enabled>
+ <server operation="remove">
+ <name>ntp1</name>
+ </server>
+ </ntp>
+ </system>
+ </config>
+
+'''
+
+RETURN = '''
+server_capabilities:
+ description: list of capabilities of the server
+ returned: success
+ type: list of strings
+ sample: ['urn:ietf:params:netconf:base:1.1','urn:ietf:params:netconf:capability:confirmed-commit:1.0','urn:ietf:params:netconf:capability:candidate:1.0']
+
+'''
+
+import xml.dom.minidom
+try:
+ import ncclient.manager
+ HAS_NCCLIENT = True
+except ImportError:
+ HAS_NCCLIENT = False
+
+
+import logging
+
+
+def netconf_edit_config(m, xml, commit, retkwargs):
+ if ":candidate" in m.server_capabilities:
+ datastore = 'candidate'
+ else:
+ datastore = 'running'
+ m.lock(target=datastore)
+ try:
+ m.discard_changes()
+ config_before = m.get_config(source=datastore)
+ m.edit_config(target=datastore, config=xml)
+ config_after = m.get_config(source=datastore)
+ changed = config_before.data_xml != config_after.data_xml
+ if changed and commit:
+ if ":confirmed-commit" in m.server_capabilities:
+ m.commit(confirmed=True)
+ m.commit()
+ else:
+ m.commit()
+ return changed
+ finally:
+ m.unlock(target=datastore)
+
+
+# ------------------------------------------------------------------- #
+# Main
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ port=dict(type='int', default=830),
+ hostkey_verify=dict(type='bool', default=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ xml=dict(type='str', required=True),
+ )
+ )
+
+ if not HAS_NCCLIENT:
+ module.fail_json(msg='could not import the python library '
+ 'ncclient required by this module')
+
+ try:
+ xml.dom.minidom.parseString(module.params['xml'])
+ except:
+ e = get_exception()
+ module.fail_json(
+ msg='error parsing XML: ' +
+ str(e)
+ )
+ return
+
+ nckwargs = dict(
+ host=module.params['host'],
+ port=module.params['port'],
+ hostkey_verify=module.params['hostkey_verify'],
+ username=module.params['username'],
+ password=module.params['password'],
+ )
+ retkwargs = dict()
+
+ try:
+ m = ncclient.manager.connect(**nckwargs)
+ except ncclient.transport.errors.AuthenticationError:
+ module.fail_json(
+ msg='authentication failed while connecting to device'
+ )
+ except:
+ e = get_exception()
+ module.fail_json(
+ msg='error connecting to the device: ' +
+ str(e)
+ )
+ return
+ retkwargs['server_capabilities'] = list(m.server_capabilities)
+ try:
+ changed = netconf_edit_config(
+ m=m,
+ xml=module.params['xml'],
+ commit=True,
+ retkwargs=retkwargs,
+ )
+ finally:
+ m.close_session()
+ module.exit_json(changed=changed, **retkwargs)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/network/nmcli.py b/lib/ansible/modules/extras/network/nmcli.py
new file mode 100644
index 0000000000..5e729af786
--- /dev/null
+++ b/lib/ansible/modules/extras/network/nmcli.py
@@ -0,0 +1,1086 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+#
+# This file is a module for Ansible that interacts with Network Manager
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION='''
+---
+module: nmcli
+author: "Chris Long (@alcamie101)"
+short_description: Manage Networking
+requirements: [ nmcli, dbus ]
+version_added: "2.0"
+description:
+ - Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc.
+options:
+ state:
+ required: True
+ choices: [ present, absent ]
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ autoconnect:
+ required: False
+ default: "yes"
+ choices: [ "yes", "no" ]
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated
+ conn_name:
+ required: True
+ description:
+ - 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
+ ifname:
+ required: False
+ default: conn_name
+ description:
+ - Where IFNAME will be the what we call the interface name.
+ - interface to bind the connection to. The connection will only be applicable to this interface name.
+ - A special value of "*" can be used for interface-independent connections.
+ - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
+ type:
+ required: False
+ choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ]
+ description:
+ - This is the type of device or network connection that you wish to create.
+ mode:
+ required: False
+ choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ]
+ default: balence-rr
+ description:
+ - This is the type of device or network connection that you wish to create for a bond, team or bridge.
+ master:
+ required: False
+ default: None
+ description:
+ - master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ ip4:
+ required: False
+ default: None
+ description:
+ - 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"'
+ gw4:
+ required: False
+ description:
+ - 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"'
+ dns4:
+ required: False
+ default: None
+ description:
+ - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["192.0.2.53", "198.51.100.53"]'
+ ip6:
+ required: False
+ default: None
+ description:
+ - 'The IPv6 address to this interface using this format ie: "abbe::cafe"'
+ gw6:
+ required: False
+ default: None
+ description:
+ - 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"'
+ dns6:
+ required: False
+ description:
+ - 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]'
+ mtu:
+ required: False
+ default: 1500
+ description:
+ - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
+ primary:
+ required: False
+ default: None
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'
+ miimon:
+ required: False
+ default: 100
+ description:
+ - This is only used with bond - miimon
+ downdelay:
+ required: False
+ default: None
+ description:
+ - This is only used with bond - downdelay
+ updelay:
+ required: False
+ default: None
+ description:
+ - This is only used with bond - updelay
+ arp_interval:
+ required: False
+ default: None
+ description:
+ - This is only used with bond - ARP interval
+ arp_ip_target:
+ required: False
+ default: None
+ description:
+ - This is only used with bond - ARP IP target
+ stp:
+ required: False
+ default: None
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge
+ priority:
+ required: False
+ default: 128
+ description:
+ - This is only used with 'bridge' - sets STP priority
+ forwarddelay:
+ required: False
+ default: 15
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds
+ hellotime:
+ required: False
+ default: 2
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds
+ maxage:
+ required: False
+ default: 20
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds
+ ageingtime:
+ required: False
+ default: 300
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds
+ mac:
+ required: False
+ default: None
+ description:
+ - 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)'
+ slavepriority:
+ required: False
+ default: 32
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave
+ path_cost:
+ required: False
+ default: 100
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave
+ hairpin:
+ required: False
+ default: yes
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on.
+ vlanid:
+ required: False
+ default: None
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>
+ vlandev:
+ required: False
+ default: None
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname
+ flags:
+ required: False
+ default: None
+ description:
+ - This is only used with VLAN - flags
+ ingress:
+ required: False
+ default: None
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping
+ egress:
+ required: False
+ default: None
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping
+
+'''
+
+EXAMPLES='''
+The following examples are working examples that I have run in the field. I followed follow the structure:
+```
+|_/inventory/cloud-hosts
+| /group_vars/openstack-stage.yml
+| /host_vars/controller-01.openstack.host.com
+| /host_vars/controller-02.openstack.host.com
+|_/playbook/library/nmcli.py
+| /playbook-add.yml
+| /playbook-del.yml
+```
+
+## inventory examples
+### groups_vars
+```yml
+---
+#devops_os_define_network
+storage_gw: "192.0.2.254"
+external_gw: "198.51.100.254"
+tenant_gw: "203.0.113.254"
+
+#Team vars
+nmcli_team:
+ - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
+ - {conn_name: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
+ - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
+nmcli_team_slave:
+ - {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
+ - {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
+ - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
+ - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
+
+#bond vars
+nmcli_bond:
+ - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'}
+ - {conn_name: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'}
+ - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'}
+nmcli_bond_slave:
+ - {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
+ - {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
+ - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
+ - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
+
+#ethernet vars
+nmcli_ethernet:
+ - {conn_name: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
+ - {conn_name: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"}
+ - {conn_name: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
+ - {conn_name: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
+```
+
+### host_vars
+```yml
+---
+storage_ip: "192.0.2.91/23"
+external_ip: "198.51.100.23/21"
+tenant_ip: "203.0.113.77/23"
+```
+
+
+
+## playbook-add.yml example
+
+```yml
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+- name: install needed network manager libs
+ yum: name={{ item }} state=installed
+ with_items:
+ - NetworkManager-glib
+ - libnm-qt-devel.x86_64
+ - nm-connection-editor.x86_64
+ - libsemanage-python
+ - policycoreutils-python
+
+##### Working with all cloud nodes - Teaming
+ - name: try nmcli add team - conn_name only & ip4 gw4
+ nmcli: type=team conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
+ with_items:
+ - "{{nmcli_team}}"
+
+ - name: try nmcli add teams-slave
+ nmcli: type=team-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
+ with_items:
+ - "{{nmcli_team_slave}}"
+
+###### Working with all cloud nodes - Bonding
+# - name: try nmcli add bond - conn_name only & ip4 gw4 mode
+# nmcli: type=bond conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present
+# with_items:
+# - "{{nmcli_bond}}"
+#
+# - name: try nmcli add bond-slave
+# nmcli: type=bond-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
+# with_items:
+# - "{{nmcli_bond_slave}}"
+
+##### Working with all cloud nodes - Ethernet
+# - name: nmcli add Ethernet - conn_name only & ip4 gw4
+# nmcli: type=ethernet conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
+# with_items:
+# - "{{nmcli_ethernet}}"
+```
+
+## playbook-del.yml example
+
+```yml
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: try nmcli del team - multiple
+ nmcli: conn_name={{item.conn_name}} state=absent
+ with_items:
+ - { conn_name: 'em1'}
+ - { conn_name: 'em2'}
+ - { conn_name: 'p1p1'}
+ - { conn_name: 'p1p2'}
+ - { conn_name: 'p2p1'}
+ - { conn_name: 'p2p2'}
+ - { conn_name: 'tenant'}
+ - { conn_name: 'storage'}
+ - { conn_name: 'external'}
+ - { conn_name: 'team-em1'}
+ - { conn_name: 'team-em2'}
+ - { conn_name: 'team-p1p1'}
+ - { conn_name: 'team-p1p2'}
+ - { conn_name: 'team-p2p1'}
+ - { conn_name: 'team-p2p2'}
+```
+# To add an Ethernet connection with static IP configuration, issue a command as follows
+- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+
+# To add an Team connection with static IP configuration, issue a command as follows
+- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.0.2.100/24 gw4=192.0.2.1 state=present autoconnect=yes
+
+# Optionally, at the same time specify IPv6 addresses for the device as follows:
+- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 ip6=2001:db8::cafe gw6=2001:db8::1 state=present
+
+# To add two IPv4 DNS server addresses:
+-nmcli: conn_name=my-eth1 dns4=["192.0.2.53", "198.51.100.53"] state=present
+
+# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
+- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present
+
+# To change the property of a setting e.g. MTU, issue a command as follows:
+- nmcli: conn_name=my-eth1 mtu=9000 type=ethernet state=present
+
+ Exit Status's:
+ - nmcli exits with status 0 if it succeeds, a value greater than 0 is
+ returned if an error occurs.
+ - 0 Success - indicates the operation succeeded
+ - 1 Unknown or unspecified error
+ - 2 Invalid user input, wrong nmcli invocation
+ - 3 Timeout expired (see --wait option)
+ - 4 Connection activation failed
+ - 5 Connection deactivation failed
+ - 6 Disconnecting device failed
+ - 7 Connection deletion failed
+ - 8 NetworkManager is not running
+ - 9 nmcli and NetworkManager versions mismatch
+ - 10 Connection, device, or access point does not exist.
+'''
+# import ansible.module_utils.basic
+import os
+import sys
+HAVE_DBUS=False
+try:
+ import dbus
+ HAVE_DBUS=True
+except ImportError:
+ pass
+
+HAVE_NM_CLIENT=False
+try:
+ from gi.repository import NetworkManager, NMClient
+ HAVE_NM_CLIENT=True
+except ImportError:
+ pass
+
+class Nmcli(object):
+ """
+ This is the generic nmcli manipulation class that is subclassed based on platform.
+ A subclass may wish to override the following action methods:-
+ - create_connection()
+ - delete_connection()
+ - modify_connection()
+ - show_connection()
+ - up_connection()
+ - down_connection()
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform='Generic'
+ distribution=None
+ bus=dbus.SystemBus()
+ # The following is going to be used in dbus code
+ DEVTYPES={1: "Ethernet",
+ 2: "Wi-Fi",
+ 5: "Bluetooth",
+ 6: "OLPC",
+ 7: "WiMAX",
+ 8: "Modem",
+ 9: "InfiniBand",
+ 10: "Bond",
+ 11: "VLAN",
+ 12: "ADSL",
+ 13: "Bridge",
+ 14: "Generic",
+ 15: "Team"
+ }
+ STATES={0: "Unknown",
+ 10: "Unmanaged",
+ 20: "Unavailable",
+ 30: "Disconnected",
+ 40: "Prepare",
+ 50: "Config",
+ 60: "Need Auth",
+ 70: "IP Config",
+ 80: "IP Check",
+ 90: "Secondaries",
+ 100: "Activated",
+ 110: "Deactivating",
+ 120: "Failed"
+ }
+
+
+ def __init__(self, module):
+ self.module=module
+ self.state=module.params['state']
+ self.autoconnect=module.params['autoconnect']
+ self.conn_name=module.params['conn_name']
+ self.master=module.params['master']
+ self.ifname=module.params['ifname']
+ self.type=module.params['type']
+ self.ip4=module.params['ip4']
+ self.gw4=module.params['gw4']
+ self.dns4=module.params['dns4']
+ self.ip6=module.params['ip6']
+ self.gw6=module.params['gw6']
+ self.dns6=module.params['dns6']
+ self.mtu=module.params['mtu']
+ self.stp=module.params['stp']
+ self.priority=module.params['priority']
+ self.mode=module.params['mode']
+ self.miimon=module.params['miimon']
+ self.downdelay=module.params['downdelay']
+ self.updelay=module.params['updelay']
+ self.arp_interval=module.params['arp_interval']
+ self.arp_ip_target=module.params['arp_ip_target']
+ self.slavepriority=module.params['slavepriority']
+ self.forwarddelay=module.params['forwarddelay']
+ self.hellotime=module.params['hellotime']
+ self.maxage=module.params['maxage']
+ self.ageingtime=module.params['ageingtime']
+ self.mac=module.params['mac']
+ self.vlanid=module.params['vlanid']
+ self.vlandev=module.params['vlandev']
+ self.flags=module.params['flags']
+ self.ingress=module.params['ingress']
+ self.egress=module.params['egress']
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def merge_secrets(self, proxy, config, setting_name):
+ try:
+ # returns a dict of dicts mapping name::setting, where setting is a dict
+ # mapping key::value. Each member of the 'setting' dict is a secret
+ secrets=proxy.GetSecrets(setting_name)
+
+ # Copy the secrets into our connection config
+ for setting in secrets:
+ for key in secrets[setting]:
+ config[setting_name][key]=secrets[setting][key]
+ except Exception, e:
+ pass
+
+ def dict_to_string(self, d):
+ # Try to trivially translate a dictionary's elements into nice string
+ # formatting.
+ dstr=""
+ for key in d:
+ val=d[key]
+ str_val=""
+ add_string=True
+ if type(val)==type(dbus.Array([])):
+ for elt in val:
+ if type(elt)==type(dbus.Byte(1)):
+ str_val+="%s " % int(elt)
+ elif type(elt)==type(dbus.String("")):
+ str_val+="%s" % elt
+ elif type(val)==type(dbus.Dictionary({})):
+ dstr+=self.dict_to_string(val)
+ add_string=False
+ else:
+ str_val=val
+ if add_string:
+ dstr+="%s: %s\n" % ( key, str_val)
+ return dstr
+
+ def connection_to_string(self, config):
+ # dump a connection configuration to use in list_connection_info
+ setting_list=[]
+ for setting_name in config:
+ setting_list.append(self.dict_to_string(config[setting_name]))
+ return setting_list
+ # print ""
+
+ def bool_to_string(self, boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
+ def list_connection_info(self):
+ # Ask the settings service for the list of connections it provides
+ bus=dbus.SystemBus()
+
+ service_name="org.freedesktop.NetworkManager"
+ proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
+ settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
+ connection_paths=settings.ListConnections()
+ connection_list=[]
+ # List each connection's name, UUID, and type
+ for path in connection_paths:
+ con_proxy=bus.get_object(service_name, path)
+ settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
+ config=settings_connection.GetSettings()
+
+ # Now get secrets too; we grab the secrets for each type of connection
+ # (since there isn't a "get all secrets" call because most of the time
+ # you only need 'wifi' secrets or '802.1x' secrets, not everything) and
+ # merge that into the configuration data - To use at a later stage
+ self.merge_secrets(settings_connection, config, '802-11-wireless')
+ self.merge_secrets(settings_connection, config, '802-11-wireless-security')
+ self.merge_secrets(settings_connection, config, '802-1x')
+ self.merge_secrets(settings_connection, config, 'gsm')
+ self.merge_secrets(settings_connection, config, 'cdma')
+ self.merge_secrets(settings_connection, config, 'ppp')
+
+ # Get the details of the 'connection' setting
+ s_con=config['connection']
+ connection_list.append(s_con['id'])
+ connection_list.append(s_con['uuid'])
+ connection_list.append(s_con['type'])
+ connection_list.append(self.connection_to_string(config))
+ return connection_list
+
+ def connection_exists(self):
+ # we are going to use name and type in this instance to find if that connection exists and is of type x
+ connections=self.list_connection_info()
+
+ for con_item in connections:
+ if self.conn_name==con_item:
+ return True
+
+ def down_connection(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # if self.connection_exists():
+ cmd.append('con')
+ cmd.append('down')
+ cmd.append(self.conn_name)
+ return self.execute_command(cmd)
+
+ def up_connection(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd.append('con')
+ cmd.append('up')
+ cmd.append(self.conn_name)
+ return self.execute_command(cmd)
+
+ def create_connection_team(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating team interface
+ cmd.append('con')
+ cmd.append('add')
+ cmd.append('type')
+ cmd.append('team')
+ cmd.append('con-name')
+ if self.conn_name is not None:
+ cmd.append(self.conn_name)
+ elif self.ifname is not None:
+ cmd.append(self.ifname)
+ cmd.append('ifname')
+ if self.ifname is not None:
+ cmd.append(self.ifname)
+ elif self.conn_name is not None:
+ cmd.append(self.conn_name)
+ if self.ip4 is not None:
+ cmd.append('ip4')
+ cmd.append(self.ip4)
+ if self.gw4 is not None:
+ cmd.append('gw4')
+ cmd.append(self.gw4)
+ if self.ip6 is not None:
+ cmd.append('ip6')
+ cmd.append(self.ip6)
+ if self.gw6 is not None:
+ cmd.append('gw6')
+ cmd.append(self.gw6)
+ if self.autoconnect is not None:
+ cmd.append('autoconnect')
+ cmd.append(self.bool_to_string(self.autoconnect))
+ return cmd
+
+ def modify_connection_team(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying team interface
+ cmd.append('con')
+ cmd.append('mod')
+ cmd.append(self.conn_name)
+ if self.ip4 is not None:
+ cmd.append('ipv4.address')
+ cmd.append(self.ip4)
+ if self.gw4 is not None:
+ cmd.append('ipv4.gateway')
+ cmd.append(self.gw4)
+ if self.dns4 is not None:
+ cmd.append('ipv4.dns')
+ cmd.append(self.dns4)
+ if self.ip6 is not None:
+ cmd.append('ipv6.address')
+ cmd.append(self.ip6)
+ if self.gw6 is not None:
+ cmd.append('ipv6.gateway')
+ cmd.append(self.gw6)
+ if self.dns6 is not None:
+ cmd.append('ipv6.dns')
+ cmd.append(self.dns6)
+ if self.autoconnect is not None:
+ cmd.append('autoconnect')
+ cmd.append(self.bool_to_string(self.autoconnect))
+ # Can't use MTU with team
+ return cmd
+
+ def create_connection_team_slave(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating team-slave interface
+ cmd.append('connection')
+ cmd.append('add')
+ cmd.append('type')
+ cmd.append(self.type)
+ cmd.append('con-name')
+ if self.conn_name is not None:
+ cmd.append(self.conn_name)
+ elif self.ifname is not None:
+ cmd.append(self.ifname)
+ cmd.append('ifname')
+ if self.ifname is not None:
+ cmd.append(self.ifname)
+ elif self.conn_name is not None:
+ cmd.append(self.conn_name)
+ cmd.append('master')
+ if self.conn_name is not None:
+ cmd.append(self.master)
+ # if self.mtu is not None:
+ # cmd.append('802-3-ethernet.mtu')
+ # cmd.append(self.mtu)
+ return cmd
+
+ def modify_connection_team_slave(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying team-slave interface
+ cmd.append('con')
+ cmd.append('mod')
+ cmd.append(self.conn_name)
+ cmd.append('connection.master')
+ cmd.append(self.master)
+ if self.mtu is not None:
+ cmd.append('802-3-ethernet.mtu')
+ cmd.append(self.mtu)
+ return cmd
+
+ def create_connection_bond(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating bond interface
+ cmd.append('con')
+ cmd.append('add')
+ cmd.append('type')
+ cmd.append('bond')
+ cmd.append('con-name')
+ if self.conn_name is not None:
+ cmd.append(self.conn_name)
+ elif self.ifname is not None:
+ cmd.append(self.ifname)
+ cmd.append('ifname')
+ if self.ifname is not None:
+ cmd.append(self.ifname)
+ elif self.conn_name is not None:
+ cmd.append(self.conn_name)
+ if self.ip4 is not None:
+ cmd.append('ip4')
+ cmd.append(self.ip4)
+ if self.gw4 is not None:
+ cmd.append('gw4')
+ cmd.append(self.gw4)
+ if self.ip6 is not None:
+ cmd.append('ip6')
+ cmd.append(self.ip6)
+ if self.gw6 is not None:
+ cmd.append('gw6')
+ cmd.append(self.gw6)
+ if self.autoconnect is not None:
+ cmd.append('autoconnect')
+ cmd.append(self.bool_to_string(self.autoconnect))
+ if self.mode is not None:
+ cmd.append('mode')
+ cmd.append(self.mode)
+ if self.miimon is not None:
+ cmd.append('miimon')
+ cmd.append(self.miimon)
+ if self.downdelay is not None:
+ cmd.append('downdelay')
+ cmd.append(self.downdelay)
+ if self.downdelay is not None:
+ cmd.append('updelay')
+ cmd.append(self.updelay)
+ if self.downdelay is not None:
+ cmd.append('arp-interval')
+ cmd.append(self.arp_interval)
+ if self.downdelay is not None:
+ cmd.append('arp-ip-target')
+ cmd.append(self.arp_ip_target)
+ return cmd
+
+ def modify_connection_bond(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying bond interface
+ cmd.append('con')
+ cmd.append('mod')
+ cmd.append(self.conn_name)
+ if self.ip4 is not None:
+ cmd.append('ipv4.address')
+ cmd.append(self.ip4)
+ if self.gw4 is not None:
+ cmd.append('ipv4.gateway')
+ cmd.append(self.gw4)
+ if self.dns4 is not None:
+ cmd.append('ipv4.dns')
+ cmd.append(self.dns4)
+ if self.ip6 is not None:
+ cmd.append('ipv6.address')
+ cmd.append(self.ip6)
+ if self.gw6 is not None:
+ cmd.append('ipv6.gateway')
+ cmd.append(self.gw6)
+ if self.dns6 is not None:
+ cmd.append('ipv6.dns')
+ cmd.append(self.dns6)
+ if self.autoconnect is not None:
+ cmd.append('autoconnect')
+ cmd.append(self.bool_to_string(self.autoconnect))
+ return cmd
+
+ def create_connection_bond_slave(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating bond-slave interface
+ cmd.append('connection')
+ cmd.append('add')
+ cmd.append('type')
+ cmd.append('bond-slave')
+ cmd.append('con-name')
+ if self.conn_name is not None:
+ cmd.append(self.conn_name)
+ elif self.ifname is not None:
+ cmd.append(self.ifname)
+ cmd.append('ifname')
+ if self.ifname is not None:
+ cmd.append(self.ifname)
+ elif self.conn_name is not None:
+ cmd.append(self.conn_name)
+ cmd.append('master')
+ if self.conn_name is not None:
+ cmd.append(self.master)
+ return cmd
+
+ def modify_connection_bond_slave(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying bond-slave interface
+ cmd.append('con')
+ cmd.append('mod')
+ cmd.append(self.conn_name)
+ cmd.append('connection.master')
+ cmd.append(self.master)
+ return cmd
+
+ def create_connection_ethernet(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating ethernet interface
+ # To add an Ethernet connection with static IP configuration, issue a command as follows
+ # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+ # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
+ cmd.append('con')
+ cmd.append('add')
+ cmd.append('type')
+ cmd.append('ethernet')
+ cmd.append('con-name')
+ if self.conn_name is not None:
+ cmd.append(self.conn_name)
+ elif self.ifname is not None:
+ cmd.append(self.ifname)
+ cmd.append('ifname')
+ if self.ifname is not None:
+ cmd.append(self.ifname)
+ elif self.conn_name is not None:
+ cmd.append(self.conn_name)
+ if self.ip4 is not None:
+ cmd.append('ip4')
+ cmd.append(self.ip4)
+ if self.gw4 is not None:
+ cmd.append('gw4')
+ cmd.append(self.gw4)
+ if self.ip6 is not None:
+ cmd.append('ip6')
+ cmd.append(self.ip6)
+ if self.gw6 is not None:
+ cmd.append('gw6')
+ cmd.append(self.gw6)
+ if self.autoconnect is not None:
+ cmd.append('autoconnect')
+ cmd.append(self.bool_to_string(self.autoconnect))
+ return cmd
+
+ def modify_connection_ethernet(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying ethernet interface
+ # To add an Ethernet connection with static IP configuration, issue a command as follows
+ # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+ # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
+ cmd.append('con')
+ cmd.append('mod')
+ cmd.append(self.conn_name)
+ if self.ip4 is not None:
+ cmd.append('ipv4.address')
+ cmd.append(self.ip4)
+ if self.gw4 is not None:
+ cmd.append('ipv4.gateway')
+ cmd.append(self.gw4)
+ if self.dns4 is not None:
+ cmd.append('ipv4.dns')
+ cmd.append(self.dns4)
+ if self.ip6 is not None:
+ cmd.append('ipv6.address')
+ cmd.append(self.ip6)
+ if self.gw6 is not None:
+ cmd.append('ipv6.gateway')
+ cmd.append(self.gw6)
+ if self.dns6 is not None:
+ cmd.append('ipv6.dns')
+ cmd.append(self.dns6)
+ if self.mtu is not None:
+ cmd.append('802-3-ethernet.mtu')
+ cmd.append(self.mtu)
+ if self.autoconnect is not None:
+ cmd.append('autoconnect')
+ cmd.append(self.bool_to_string(self.autoconnect))
+ return cmd
+
+ def create_connection_bridge(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating bridge interface
+ return cmd
+
+ def modify_connection_bridge(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying bridge interface
+ return cmd
+
+ def create_connection_vlan(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for creating ethernet interface
+ return cmd
+
+ def modify_connection_vlan(self):
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ # format for modifying ethernet interface
+ return cmd
+
+ def create_connection(self):
+ cmd=[]
+ if self.type=='team':
+ # cmd=self.create_connection_team()
+ if (self.dns4 is not None) or (self.dns6 is not None):
+ cmd=self.create_connection_team()
+ self.execute_command(cmd)
+ cmd=self.modify_connection_team()
+ self.execute_command(cmd)
+ cmd=self.up_connection()
+ return self.execute_command(cmd)
+ elif (self.dns4 is None) or (self.dns6 is None):
+ cmd=self.create_connection_team()
+ return self.execute_command(cmd)
+ elif self.type=='team-slave':
+ if self.mtu is not None:
+ cmd=self.create_connection_team_slave()
+ self.execute_command(cmd)
+ cmd=self.modify_connection_team_slave()
+ self.execute_command(cmd)
+ # cmd=self.up_connection()
+ return self.execute_command(cmd)
+ else:
+ cmd=self.create_connection_team_slave()
+ return self.execute_command(cmd)
+ elif self.type=='bond':
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ cmd=self.create_connection_bond()
+ self.execute_command(cmd)
+ cmd=self.modify_connection_bond()
+ self.execute_command(cmd)
+ cmd=self.up_connection()
+ return self.execute_command(cmd)
+ else:
+ cmd=self.create_connection_bond()
+ return self.execute_command(cmd)
+ elif self.type=='bond-slave':
+ cmd=self.create_connection_bond_slave()
+ elif self.type=='ethernet':
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ cmd=self.create_connection_ethernet()
+ self.execute_command(cmd)
+ cmd=self.modify_connection_ethernet()
+ self.execute_command(cmd)
+ cmd=self.up_connection()
+ return self.execute_command(cmd)
+ else:
+ cmd=self.create_connection_ethernet()
+ return self.execute_command(cmd)
+ elif self.type=='bridge':
+ cmd=self.create_connection_bridge()
+ elif self.type=='vlan':
+ cmd=self.create_connection_vlan()
+ return self.execute_command(cmd)
+
+ def remove_connection(self):
+ # self.down_connection()
+ cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd.append('con')
+ cmd.append('del')
+ cmd.append(self.conn_name)
+ return self.execute_command(cmd)
+
+ def modify_connection(self):
+ cmd=[]
+ if self.type=='team':
+ cmd=self.modify_connection_team()
+ elif self.type=='team-slave':
+ cmd=self.modify_connection_team_slave()
+ elif self.type=='bond':
+ cmd=self.modify_connection_bond()
+ elif self.type=='bond-slave':
+ cmd=self.modify_connection_bond_slave()
+ elif self.type=='ethernet':
+ cmd=self.modify_connection_ethernet()
+ elif self.type=='bridge':
+ cmd=self.modify_connection_bridge()
+ elif self.type=='vlan':
+ cmd=self.modify_connection_vlan()
+ return self.execute_command(cmd)
+
+
+def main():
+ # Parsing argument file
+ module=AnsibleModule(
+ argument_spec=dict(
+ autoconnect=dict(required=False, default=None, type='bool'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ conn_name=dict(required=True, type='str'),
+ master=dict(required=False, default=None, type='str'),
+ ifname=dict(required=False, default=None, type='str'),
+ type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'),
+ ip4=dict(required=False, default=None, type='str'),
+ gw4=dict(required=False, default=None, type='str'),
+ dns4=dict(required=False, default=None, type='str'),
+ ip6=dict(required=False, default=None, type='str'),
+ gw6=dict(required=False, default=None, type='str'),
+ dns6=dict(required=False, default=None, type='str'),
+ # Bond Specific vars
+ mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'),
+ miimon=dict(required=False, default=None, type='str'),
+ downdelay=dict(required=False, default=None, type='str'),
+ updelay=dict(required=False, default=None, type='str'),
+ arp_interval=dict(required=False, default=None, type='str'),
+ arp_ip_target=dict(required=False, default=None, type='str'),
+ # general usage
+ mtu=dict(required=False, default=None, type='str'),
+ mac=dict(required=False, default=None, type='str'),
+ # bridge specific vars
+ stp=dict(required=False, default=True, type='bool'),
+ priority=dict(required=False, default="128", type='str'),
+ slavepriority=dict(required=False, default="32", type='str'),
+ forwarddelay=dict(required=False, default="15", type='str'),
+ hellotime=dict(required=False, default="2", type='str'),
+ maxage=dict(required=False, default="20", type='str'),
+ ageingtime=dict(required=False, default="300", type='str'),
+ # vlan specific vars
+ vlanid=dict(required=False, default=None, type='str'),
+ vlandev=dict(required=False, default=None, type='str'),
+ flags=dict(required=False, default=None, type='str'),
+ ingress=dict(required=False, default=None, type='str'),
+ egress=dict(required=False, default=None, type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAVE_DBUS:
+ module.fail_json(msg="This module requires dbus python bindings")
+
+ if not HAVE_NM_CLIENT:
+ module.fail_json(msg="This module requires NetworkManager glib API")
+
+ nmcli=Nmcli(module)
+
+ rc=None
+ out=''
+ err=''
+ result={}
+ result['conn_name']=nmcli.conn_name
+ result['state']=nmcli.state
+
+ # check for issues
+ if nmcli.conn_name is None:
+ nmcli.module.fail_json(msg="You haven't specified a name for the connection")
+ # team-slave checks
+ if nmcli.type=='team-slave' and nmcli.master is None:
+ nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing")
+ if nmcli.type=='team-slave' and nmcli.ifname is None:
+ nmcli.module.fail_json(msg="You haven't specified a name for the connection")
+
+ if nmcli.state=='absent':
+ if nmcli.connection_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err)=nmcli.down_connection()
+ (rc, out, err)=nmcli.remove_connection()
+ if rc!=0:
+ module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+ elif nmcli.state=='present':
+ if nmcli.connection_exists():
+ # modify connection (note: this function is check mode aware)
+ # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+ result['Exists']='Connections do exist so we are modifying them'
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err)=nmcli.modify_connection()
+ if not nmcli.connection_exists():
+ result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err)=nmcli.create_connection()
+ if rc is not None and rc!=0:
+ module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed']=False
+ else:
+ result['changed']=True
+ if out:
+ result['stdout']=out
+ if err:
+ result['stderr']=err
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/network/openvswitch_bridge.py b/lib/ansible/modules/extras/network/openvswitch_bridge.py
new file mode 100644
index 0000000000..68528dd478
--- /dev/null
+++ b/lib/ansible/modules/extras/network/openvswitch_bridge.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2013, David Stygstra <david.stygstra@gmail.com>
+#
+# Portions copyright @ 2015 VMware, Inc.
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+# pylint: disable=C0111
+
+DOCUMENTATION = '''
+---
+module: openvswitch_bridge
+version_added: 1.4
+author: "David Stygstra (@stygstra)"
+short_description: Manage Open vSwitch bridges
+requirements: [ ovs-vsctl ]
+description:
+ - Manage Open vSwitch bridges
+options:
+ bridge:
+ required: true
+ description:
+ - Name of bridge to manage
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the bridge should exist
+ timeout:
+ required: false
+ default: 5
+ description:
+ - How long to wait for ovs-vswitchd to respond
+ external_ids:
+ version_added: 2.0
+ required: false
+ default: None
+ description:
+ - A dictionary of external-ids. Omitting this parameter is a No-op.
+ To clear all external-ids pass an empty value.
+ fail_mode:
+ version_added: 2.0
+ default: None
+ required: false
+ choices : [secure, standalone]
+ description:
+ - Set bridge fail-mode. The default value (None) is a No-op.
+'''
+
+EXAMPLES = '''
+# Create a bridge named br-int
+- openvswitch_bridge: bridge=br-int state=present
+
+# Create an integration bridge
+- openvswitch_bridge: bridge=br-int state=present fail_mode=secure
+ args:
+ external_ids:
+ bridge-id: "br-int"
+'''
+
+
+class OVSBridge(object):
+ """ Interface to ovs-vsctl. """
+ def __init__(self, module):
+ self.module = module
+ self.bridge = module.params['bridge']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.fail_mode = module.params['fail_mode']
+
+ def _vsctl(self, command):
+ '''Run ovs-vsctl command'''
+ return self.module.run_command(['ovs-vsctl', '-t',
+ str(self.timeout)] + command)
+
+ def exists(self):
+ '''Check if the bridge already exists'''
+ rtc, _, err = self._vsctl(['br-exists', self.bridge])
+ if rtc == 0: # See ovs-vsctl(8) for status codes
+ return True
+ if rtc == 2:
+ return False
+ self.module.fail_json(msg=err)
+
+ def add(self):
+ '''Create the bridge'''
+ rtc, _, err = self._vsctl(['add-br', self.bridge])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+ if self.fail_mode:
+ self.set_fail_mode()
+
+ def delete(self):
+ '''Delete the bridge'''
+ rtc, _, err = self._vsctl(['del-br', self.bridge])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ changed = False
+
+ # pylint: disable=W0703
+ try:
+ if self.state == 'present' and self.exists():
+ if (self.fail_mode and
+ (self.fail_mode != self.get_fail_mode())):
+ changed = True
+
+ ##
+ # Check if external ids would change.
+ current_external_ids = self.get_external_ids()
+ exp_external_ids = self.module.params['external_ids']
+ if exp_external_ids is not None:
+ for (key, value) in exp_external_ids:
+ if ((key in current_external_ids) and
+ (value != current_external_ids[key])):
+ changed = True
+
+ ##
+ # Check if external ids would be removed.
+ for (key, value) in current_external_ids.items():
+ if key not in exp_external_ids:
+ changed = True
+
+ elif self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+ except Exception, earg:
+ self.module.fail_json(msg=str(earg))
+
+ # pylint: enable=W0703
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+ # pylint: disable=W0703
+
+ try:
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+
+ if not self.exists():
+ self.add()
+ changed = True
+
+ current_fail_mode = self.get_fail_mode()
+ if self.fail_mode and (self.fail_mode != current_fail_mode):
+ self.module.log( "changing fail mode %s to %s" % (current_fail_mode, self.fail_mode))
+ self.set_fail_mode()
+ changed = True
+
+ current_external_ids = self.get_external_ids()
+
+ ##
+ # Change and add existing external ids.
+ exp_external_ids = self.module.params['external_ids']
+ if exp_external_ids is not None:
+ for (key, value) in exp_external_ids.items():
+ if ((value != current_external_ids.get(key, None)) and
+ self.set_external_id(key, value)):
+ changed = True
+
+ ##
+ # Remove current external ids that are not passed in.
+ for (key, value) in current_external_ids.items():
+ if ((key not in exp_external_ids) and
+ self.set_external_id(key, None)):
+ changed = True
+
+ except Exception, earg:
+ self.module.fail_json(msg=str(earg))
+ # pylint: enable=W0703
+ self.module.exit_json(changed=changed)
+
+ def get_external_ids(self):
+ """ Return the bridge's external ids as a dict. """
+ results = {}
+ if self.exists():
+ rtc, out, err = self._vsctl(['br-get-external-id', self.bridge])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+ lines = out.split("\n")
+ lines = [item.split("=") for item in lines if len(item) > 0]
+ for item in lines:
+ results[item[0]] = item[1]
+
+ return results
+
+ def set_external_id(self, key, value):
+ """ Set external id. """
+ if self.exists():
+ cmd = ['br-set-external-id', self.bridge, key]
+ if value:
+ cmd += [value]
+
+ (rtc, _, err) = self._vsctl(cmd)
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+ return True
+ return False
+
+ def get_fail_mode(self):
+ """ Get failure mode. """
+ value = ''
+ if self.exists():
+ rtc, out, err = self._vsctl(['get-fail-mode', self.bridge])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+ value = out.strip("\n")
+ return value
+
+ def set_fail_mode(self):
+ """ Set failure mode. """
+
+ if self.exists():
+ (rtc, _, err) = self._vsctl(['set-fail-mode', self.bridge,
+ self.fail_mode])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+
+# pylint: disable=E0602
+def main():
+ """ Entry point. """
+ module = AnsibleModule(
+ argument_spec={
+ 'bridge': {'required': True},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'timeout': {'default': 5, 'type': 'int'},
+ 'external_ids': {'default': None, 'type': 'dict'},
+ 'fail_mode': {'default': None},
+ },
+ supports_check_mode=True,
+ )
+
+ bridge = OVSBridge(module)
+ if module.check_mode:
+ bridge.check()
+ else:
+ bridge.run()
+
+# pylint: disable=W0614
+# pylint: disable=W0401
+# pylint: disable=W0622
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/openvswitch_db.py b/lib/ansible/modules/extras/network/openvswitch_db.py
new file mode 100644
index 0000000000..e6ec2658e0
--- /dev/null
+++ b/lib/ansible/modules/extras/network/openvswitch_db.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# pylint: disable=C0111
+
+#
+# (c) 2015, Mark Hamilton <mhamilton@vmware.com>
+#
+# Portions copyright @ 2015 VMware, Inc.
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: openvswitch_db
+author: "Mark Hamilton (mhamilton@vmware.com)"
+version_added: 2.0
+short_description: Configure open vswitch database.
+requirements: [ "ovs-vsctl >= 2.3.3" ]
+description:
+ - Set column values in record in database table.
+options:
+ table:
+ required: true
+ description:
+ - Identifies the table in the database.
+ record:
+ required: true
+ description:
+ - Identifies the recoard in the table.
+ column:
+ required: true
+ description:
+ - Identifies the column in the record.
+ key:
+ required: true
+ description:
+ - Identifies the key in the record column
+ value:
+ required: true
+ description:
+ - Expected value for the table, record, column and key.
+ timeout:
+ required: false
+ default: 5
+ description:
+ - How long to wait for ovs-vswitchd to respond
+"""
+
+EXAMPLES = '''
+# Increase the maximum idle time to 50 seconds before pruning unused kernel
+# rules.
+- openvswitch_db: table=open_vswitch record=. col=other_config key=max-idle
+ value=50000
+
+# Disable in band copy
+- openvswitch_db: table=Bridge record=br-int col=other_config
+ key=disable-in-band value=true
+'''
+
+
+def cmd_run(module, cmd, check_rc=True):
+ """ Log and run ovs-vsctl command. """
+ return module.run_command(cmd.split(" "), check_rc=check_rc)
+
+
+def params_set(module):
+ """ Implement the ovs-vsctl set commands. """
+
+ changed = False
+
+ ##
+ # Place in params dictionary in order to support the string format below.
+ module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
+
+ fmt = "%(ovs-vsctl)s -t %(timeout)s get %(table)s %(record)s " \
+ "%(col)s:%(key)s"
+
+ cmd = fmt % module.params
+
+ (_, output, _) = cmd_run(module, cmd, False)
+ if module.params['value'] not in output:
+ fmt = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \
+ "%(col)s:%(key)s=%(value)s"
+ cmd = fmt % module.params
+ ##
+ # Check if flow exists and is the same.
+ (rtc, _, err) = cmd_run(module, cmd)
+ if rtc != 0:
+ module.fail_json(msg=err)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+# pylint: disable=E0602
+def main():
+ """ Entry point for ansible module. """
+ module = AnsibleModule(
+ argument_spec={
+ 'table': {'required': True},
+ 'record': {'required': True},
+ 'col': {'required': True},
+ 'key': {'required': True},
+ 'value': {'required': True},
+ 'timeout': {'default': 5, 'type': 'int'},
+ },
+ supports_check_mode=True,
+ )
+
+ params_set(module)
+
+
+# pylint: disable=W0614
+# pylint: disable=W0401
+# pylint: disable=W0622
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/openvswitch_port.py b/lib/ansible/modules/extras/network/openvswitch_port.py
new file mode 100644
index 0000000000..c2224b5240
--- /dev/null
+++ b/lib/ansible/modules/extras/network/openvswitch_port.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# pylint: disable=C0111
+
+# (c) 2013, David Stygstra <david.stygstra@gmail.com>
+#
+# Portions copyright @ 2015 VMware, Inc.
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: openvswitch_port
+version_added: 1.4
+author: "David Stygstra (@stygstra)"
+short_description: Manage Open vSwitch ports
+requirements: [ ovs-vsctl ]
+description:
+ - Manage Open vSwitch ports
+options:
+ bridge:
+ required: true
+ description:
+ - Name of bridge to manage
+ port:
+ required: true
+ description:
+ - Name of port to manage on the bridge
+ tag:
+ version_added: 2.2
+ required: false
+ description:
+ - VLAN tag for this port
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the port should exist
+ timeout:
+ required: false
+ default: 5
+ description:
+ - How long to wait for ovs-vswitchd to respond
+ external_ids:
+ version_added: 2.0
+ required: false
+ default: {}
+ description:
+ - Dictionary of external_ids applied to a port.
+ set:
+ version_added: 2.0
+ required: false
+ default: None
+ description:
+ - Set a single property on a port.
+'''
+
+EXAMPLES = '''
+# Creates port eth2 on bridge br-ex
+- openvswitch_port: bridge=br-ex port=eth2 state=present
+
+# Creates port eth6 and set ofport equal to 6.
+- openvswitch_port: bridge=bridge-loop port=eth6 state=present
+ set="Interface eth6 ofport_request=6"
+
+# Creates port vlan10 with tag 10 on bridge br-ex
+- openvswitch_port: bridge=br-ex port=vlan10 tag=10 state=present
+ set="Interface vlan10 type=internal"
+
+# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
+# to port vifeth6 and setup port to be managed by a controller.
+- openvswitch_port: bridge=br-int port=vifeth6 state=present
+ args:
+ external_ids:
+ iface-id: "{{inventory_hostname}}-vifeth6"
+ attached-mac: "00:00:5E:00:53:23"
+ vm-id: "{{inventory_hostname}}"
+ iface-status: "active"
+'''
+
+# pylint: disable=W0703
+
+
+def truncate_before(value, srch):
+ """ Return content of str before the srch parameters. """
+
+ before_index = value.find(srch)
+ if (before_index >= 0):
+ return value[:before_index]
+ else:
+ return value
+
+
+def _set_to_get(set_cmd, module):
+ """ Convert set command to get command and set value.
+ return tuple (get command, set value)
+ """
+
+ ##
+ # If set has option: then we want to truncate just before that.
+ set_cmd = truncate_before(set_cmd, " option:")
+ get_cmd = set_cmd.split(" ")
+ (key, value) = get_cmd[-1].split("=")
+ module.log("get commands %s " % key)
+ return (["--", "get"] + get_cmd[:-1] + [key], value)
+
+
+# pylint: disable=R0902
+class OVSPort(object):
+ """ Interface to OVS port. """
+ def __init__(self, module):
+ self.module = module
+ self.bridge = module.params['bridge']
+ self.port = module.params['port']
+ self.tag = module.params['tag']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.set_opt = module.params.get('set', None)
+
+ def _vsctl(self, command, check_rc=True):
+ '''Run ovs-vsctl command'''
+
+ cmd = ['ovs-vsctl', '-t', str(self.timeout)] + command
+ return self.module.run_command(cmd, check_rc=check_rc)
+
+ def exists(self):
+ '''Check if the port already exists'''
+
+ (rtc, out, err) = self._vsctl(['list-ports', self.bridge])
+
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ return any(port.rstrip() == self.port for port in out.split('\n'))
+
+ def set(self, set_opt):
+ """ Set attributes on a port. """
+ self.module.log("set called %s" % set_opt)
+ if (not set_opt):
+ return False
+
+ (get_cmd, set_value) = _set_to_get(set_opt, self.module)
+ (rtc, out, err) = self._vsctl(get_cmd, False)
+ if rtc != 0:
+ ##
+ # ovs-vsctl -t 5 -- get Interface port external_ids:key
+ # returns failure if key does not exist.
+ out = None
+ else:
+ out = out.strip("\n")
+ out = out.strip('"')
+
+ if (out == set_value):
+ return False
+
+ (rtc, out, err) = self._vsctl(["--", "set"] + set_opt.split(" "))
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ return True
+
+ def add(self):
+ '''Add the port'''
+ cmd = ['add-port', self.bridge, self.port]
+ if self.tag:
+ cmd += ["tag=" + self.tag]
+ if self.set and self.set_opt:
+ cmd += ["--", "set"]
+ cmd += self.set_opt.split(" ")
+
+ (rtc, _, err) = self._vsctl(cmd)
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ return True
+
+ def delete(self):
+ '''Remove the port'''
+ (rtc, _, err) = self._vsctl(['del-port', self.bridge, self.port])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ try:
+ if self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+ else:
+ changed = False
+ except Exception, earg:
+ self.module.fail_json(msg=str(earg))
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+ try:
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+ ##
+ # Add any missing ports.
+ if (not self.exists()):
+ self.add()
+ changed = True
+
+ ##
+ # If the -- set changed check here and make changes
+ # but this only makes sense when state=present.
+ if (not changed):
+ changed = self.set(self.set_opt) or changed
+ items = self.module.params['external_ids'].items()
+ for (key, value) in items:
+ value = value.replace('"', '')
+ fmt_opt = "Interface %s external_ids:%s=%s"
+ external_id = fmt_opt % (self.port, key, value)
+ changed = self.set(external_id) or changed
+ ##
+ except Exception, earg:
+ self.module.fail_json(msg=str(earg))
+ self.module.exit_json(changed=changed)
+
+
+# pylint: disable=E0602
+def main():
+ """ Entry point. """
+ module = AnsibleModule(
+ argument_spec={
+ 'bridge': {'required': True},
+ 'port': {'required': True},
+ 'tag': {'required': False},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'timeout': {'default': 5, 'type': 'int'},
+ 'set': {'required': False, 'default': None},
+ 'external_ids': {'default': {}, 'required': False, 'type': 'dict'},
+ },
+ supports_check_mode=True,
+ )
+
+ port = OVSPort(module)
+ if module.check_mode:
+ port.check()
+ else:
+ port.run()
+
+
+# pylint: disable=W0614
+# pylint: disable=W0401
+# pylint: disable=W0622
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/network/snmp_facts.py b/lib/ansible/modules/extras/network/snmp_facts.py
new file mode 100644
index 0000000000..28546dfc71
--- /dev/null
+++ b/lib/ansible/modules/extras/network/snmp_facts.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+
+# This file is part of Networklore's snmp library for Ansible
+#
+# The module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# The module is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: snmp_facts
+version_added: "1.9"
+author: "Patrick Ogenstad (@ogenstad)"
+short_description: Retrieve facts for a device using SNMP.
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to target snmp server (normally {{inventory_hostname}})
+ required: true
+ version:
+ description:
+ - SNMP Version to use, v2/v2c or v3
+ choices: [ 'v2', 'v2c', 'v3' ]
+ required: true
+ community:
+ description:
+ - The SNMP community string, required if version is v2/v2c
+ required: false
+ level:
+ description:
+ - Authentication level, required if version is v3
+ choices: [ 'authPriv', 'authNoPriv' ]
+ required: false
+ username:
+ description:
+ - Username for SNMPv3, required if version is v3
+ required: false
+ integrity:
+ description:
+ - Hashing algoritm, required if version is v3
+ choices: [ 'md5', 'sha' ]
+ required: false
+ authkey:
+ description:
+ - Authentication key, required if version is v3
+ required: false
+ privacy:
+ description:
+ - Encryption algoritm, required if level is authPriv
+ choices: [ 'des', 'aes' ]
+ required: false
+ privkey:
+ description:
+ - Encryption key, required if version is authPriv
+ required: false
+'''
+
+EXAMPLES = '''
+# Gather facts with SNMP version 2
+- snmp_facts: host={{ inventory_hostname }} version=2c community=public
+ connection: local
+
+# Gather facts using SNMP version 3
+- snmp_facts:
+ host={{ inventory_hostname }}
+ version=v3
+ level=authPriv
+ integrity=sha
+ privacy=aes
+ username=snmp-user
+ authkey=abc12345
+ privkey=def6789
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+
+try:
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ has_pysnmp = True
+except:
+ has_pysnmp = False
+
+class DefineOid(object):
+
+ def __init__(self,dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:].decode("hex")
+ else:
+ return hexstring
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ else:
+ return hexstring
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options.keys():
+ return adminstatus_options[int_adminstatus]
+ else:
+ return ""
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options.keys():
+ return operstatus_options[int_operstatus]
+ else:
+ return ""
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True),
+ version=dict(required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(required=False, default=False),
+ username=dict(required=False),
+ level=dict(required=False, choices=['authNoPriv', 'authPriv']),
+ integrity=dict(required=False, choices=['md5', 'sha']),
+ privacy=dict(required=False, choices=['des', 'aes']),
+ authkey=dict(required=False),
+ privkey=dict(required=False),
+ removeplaceholder=dict(required=False)),
+ required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
+ supports_check_mode=False)
+
+ m_args = module.params
+
+ if not has_pysnmp:
+ module.fail_json(msg='Missing required pysnmp module (check docs)')
+
+ cmdGen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] == "v2" or m_args['version'] == "v2c":
+ if m_args['community'] == False:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] == None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] == None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] == "v2" or m_args['version'] == "v2c":
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ Tree = lambda: defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
+ )
+
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+
+ cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
+ )
+
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['description'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if not current_interface in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ module.exit_json(ansible_facts=results)
+
+
+main()
diff --git a/lib/ansible/modules/extras/network/wakeonlan.py b/lib/ansible/modules/extras/network/wakeonlan.py
new file mode 100644
index 0000000000..e7aa6ee7f4
--- /dev/null
+++ b/lib/ansible/modules/extras/network/wakeonlan.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: wakeonlan
+version_added: 2.2
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+ - The M(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for
+ required: true
+ default: null
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet
+ required: false
+ default: 255.255.255.255
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet
+ required: false
+ default: 7
+author: "Dag Wieers (@dagwieers)"
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off
+'''
+
+EXAMPLES = '''
+# Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+- local_action: wakeonlan mac=00:00:5E:00:53:66 broadcast=192.0.2.23
+
+- wakeonlan: mac=00:00:5E:00:53:66 port=9
+ delegate_to: localhost
+'''
+
+RETURN='''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+import socket
+import struct
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible seperator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = ''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ mac = dict(required=True, type='str'),
+ broadcast = dict(required=False, default='255.255.255.255'),
+ port = dict(required=False, type='int', default=7),
+ ),
+ )
+
+ mac = module.params.get('mac')
+ broadcast = module.params.get('broadcast')
+ port = module.params.get('port')
+
+ wakeonlan(module, mac, broadcast, port)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/__init__.py b/lib/ansible/modules/extras/notification/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/__init__.py
diff --git a/lib/ansible/modules/extras/notification/campfire.py b/lib/ansible/modules/extras/notification/campfire.py
new file mode 100644
index 0000000000..3d003e1363
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/campfire.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+---
+module: campfire
+version_added: "1.2"
+short_description: Send a message to Campfire
+description:
+ - Send a message to Campfire.
+ - Messages with newlines will result in a "Paste" message being sent.
+options:
+ subscription:
+ description:
+ - The subscription name to use.
+ required: true
+ token:
+ description:
+ - API token.
+ required: true
+ room:
+ description:
+ - Room number to which the message should be sent.
+ required: true
+ msg:
+ description:
+ - The message body.
+ required: true
+ notify:
+ description:
+ - Send a notification sound before the message.
+ required: false
+ choices: ["56k", "bell", "bezos", "bueller", "clowntown",
+ "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama", "greatjob", "greyjoy",
+ "guarantee", "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins", "makeitso", "noooo",
+ "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret", "sexyback",
+ "story", "tada", "tmyk", "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+
+# informational: requirements for nodes
+requirements: [ ]
+author: "Adam Garside (@fabulops)"
+'''
+
+EXAMPLES = '''
+- campfire: subscription=foo token=12345 room=123 msg="Task completed."
+
+- campfire: subscription=foo token=12345 room=123 notify=loggins
+ msg="Task completed ... with feeling."
+'''
+
+import cgi
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ subscription=dict(required=True),
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ notify=dict(required=False,
+ choices=["56k", "bell", "bezos", "bueller",
+ "clowntown", "cottoneyejoe",
+ "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama",
+ "greatjob", "greyjoy", "guarantee",
+ "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins",
+ "makeitso", "noooo", "nyan", "ohmy",
+ "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret",
+ "sexyback", "story", "tada", "tmyk",
+ "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah",
+ "yodel"]),
+ ),
+ supports_check_mode=False
+ )
+
+ subscription = module.params["subscription"]
+ token = module.params["token"]
+ room = module.params["room"]
+ msg = module.params["msg"]
+ notify = module.params["notify"]
+
+ URI = "https://%s.campfirenow.com" % subscription
+ NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
+ MSTR = "<message><body>%s</body></message>"
+ AGENT = "Ansible/1.2"
+
+ # Hack to add basic auth username and password the way fetch_url expects
+ module.params['url_username'] = token
+ module.params['url_password'] = 'X'
+
+ target_url = '%s/room/%s/speak.xml' % (URI, room)
+ headers = {'Content-Type': 'application/xml',
+ 'User-agent': AGENT}
+
+ # Send some audible notification if requested
+ if notify:
+ response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
+
+ # Send the message
+ response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (msg, info['status']))
+
+ module.exit_json(changed=True, room=room, msg=msg, notify=notify)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/flowdock.py b/lib/ansible/modules/extras/notification/flowdock.py
new file mode 100644
index 0000000000..24fee07af1
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/flowdock.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: flowdock
+version_added: "1.2"
+author: "Matt Coddington (@mcodd)"
+short_description: Send a message to a flowdock
+description:
+ - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ type:
+ description:
+ - Whether to post to 'inbox' or 'chat'
+ required: true
+ choices: [ "inbox", "chat" ]
+ msg:
+ description:
+ - Content of the message
+ required: true
+ tags:
+ description:
+ - tags of the message, separated by commas
+ required: false
+ external_user_name:
+ description:
+ - (chat only - required) Name of the "user" sending the message
+ required: false
+ from_address:
+ description:
+ - (inbox only - required) Email address of the message sender
+ required: false
+ source:
+ description:
+ - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
+ required: false
+ subject:
+ description:
+ - (inbox only - required) Subject line of the message
+ required: false
+ from_name:
+ description:
+ - (inbox only) Name of the message sender
+ required: false
+ reply_to:
+ description:
+ - (inbox only) Email address for replies
+ required: false
+ project:
+ description:
+ - (inbox only) Human readable identifier for more detailed message categorization
+ required: false
+ link:
+ description:
+ - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- flowdock: type=inbox
+ token=AAAAAA
+ from_address=user@example.com
+ source='my cool app'
+ msg='test from ansible'
+ subject='test subject'
+
+- flowdock: type=chat
+ token=AAAAAA
+ external_user_name=testuser
+ msg='test from ansible'
+ tags=tag1,tag2,tag3
+'''
+
+import urllib
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ type=dict(required=True, choices=["inbox","chat"]),
+ external_user_name=dict(required=False),
+ from_address=dict(required=False),
+ source=dict(required=False),
+ subject=dict(required=False),
+ from_name=dict(required=False),
+ reply_to=dict(required=False),
+ project=dict(required=False),
+ tags=dict(required=False),
+ link=dict(required=False),
+ validate_certs = dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ type = module.params["type"]
+ token = module.params["token"]
+ if type == 'inbox':
+ url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
+ else:
+ url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
+
+ params = {}
+
+ # required params
+ params['content'] = module.params["msg"]
+
+ # required params for the 'chat' type
+ if module.params['external_user_name']:
+ if type == 'inbox':
+ module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
+ else:
+ params['external_user_name'] = module.params["external_user_name"]
+ elif type == 'chat':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # required params for the 'inbox' type
+ for item in [ 'from_address', 'source', 'subject' ]:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+ elif type == 'inbox':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # optional params
+ if module.params["tags"]:
+ params['tags'] = module.params["tags"]
+
+ # optional params for the 'inbox' type
+ for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ # Send the data to Flowdock
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
+
+ module.exit_json(changed=True, msg=module.params["msg"])
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/notification/grove.py b/lib/ansible/modules/extras/notification/grove.py
new file mode 100644
index 0000000000..5e6db30d9a
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/grove.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: grove
+version_added: 1.4
+short_description: Sends a notification to a grove.io channel
+description:
+ - The M(grove) module sends a message for a service to a Grove.io
+ channel.
+options:
+ channel_token:
+ description:
+ - Token of the channel to post to.
+ required: true
+ service:
+ description:
+ - Name of the service (displayed as the "user" in the message)
+ required: false
+ default: ansible
+ message:
+ description:
+ - Message content
+ required: true
+ url:
+ description:
+ - Service URL for the web client
+ required: false
+ icon_url:
+ description:
+ - Icon for the service
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+author: "Jonas Pfenniger (@zimbatm)"
+'''
+
+EXAMPLES = '''
+- grove: >
+ channel_token=6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
+ service=my-app
+ message=deployed {{ target }}
+'''
+
+import urllib
+
+BASE_URL = 'https://grove.io/api/notice/%s/'
+
+# ==============================================================
+# do_notify_grove
+
+def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
+ my_url = BASE_URL % (channel_token,)
+
+ my_data = dict(service=service, message=message)
+ if url is not None:
+ my_data['url'] = url
+ if icon_url is not None:
+ my_data['icon_url'] = icon_url
+
+ data = urllib.urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
+
+# ==============================================================
+# main
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ channel_token = dict(type='str', required=True),
+ message = dict(type='str', required=True),
+ service = dict(type='str', default='ansible'),
+ url = dict(type='str', default=None),
+ icon_url = dict(type='str', default=None),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ channel_token = module.params['channel_token']
+ service = module.params['service']
+ message = module.params['message']
+ url = module.params['url']
+ icon_url = module.params['icon_url']
+
+ do_notify_grove(module, channel_token, service, message, url, icon_url)
+
+ # Mission complete
+ module.exit_json(msg="OK")
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
diff --git a/lib/ansible/modules/extras/notification/hall.py b/lib/ansible/modules/extras/notification/hall.py
new file mode 100755
index 0000000000..05c1a981b7
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/hall.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Billy Kimble <basslines@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = """
+module: hall
+short_description: Send notification to Hall
+description:
+ - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
+version_added: "2.0"
+author: Billy Kimble (@bkimble) <basslines@gmail.com>
+options:
+ room_token:
+ description:
+ - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
+ required: true
+ msg:
+ description:
+ - The message you wish to deliver as a notifcation
+ required: true
+ title:
+ description:
+ - The title of the message
+ required: true
+ picture:
+ description:
+ - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
+ required: false
+"""
+
+EXAMPLES = """
+- name: Send Hall notifiation
+ local_action:
+ module: hall
+ room_token: <hall room integration token>
+ title: Nginx
+ msg: Created virtual host file on {{ inventory_hostname }}
+
+- name: Send Hall notification if EC2 servers were created.
+ when: ec2.instances|length > 0
+ local_action:
+ module: hall
+ room_token: <hall room integration token>
+ title: Server Creation
+ msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region."
+ with_items: ec2.instances
+"""
+
+HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
+
+def send_request_to_hall(module, room_token, payload):
+ headers = {'Content-Type': 'application/json'}
+ payload=module.jsonify(payload)
+ api_endpoint = HALL_API_ENDPOINT % (room_token)
+ response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
+ if info['status'] != 200:
+ secure_url = HALL_API_ENDPOINT % ('[redacted]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ room_token = dict(type='str', required=True),
+ msg = dict(type='str', required=True),
+ title = dict(type='str', required=True),
+ picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
+ )
+ )
+
+ room_token = module.params['room_token']
+ message = module.params['msg']
+ title = module.params['title']
+ picture = module.params['picture']
+ payload = {'title': title, 'message': message, 'picture': picture}
+ send_request_to_hall(module, room_token, payload)
+ module.exit_json(msg="OK")
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
diff --git a/lib/ansible/modules/extras/notification/hipchat.py b/lib/ansible/modules/extras/notification/hipchat.py
new file mode 100644
index 0000000000..a07042bc3f
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/hipchat.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+---
+module: hipchat
+version_added: "1.2"
+short_description: Send a message to hipchat.
+description:
+ - Send a message to hipchat
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ room:
+ description:
+ - ID or name of the room.
+ required: true
+ from:
+ description:
+ - Name the message will appear be sent from. max 15 characters.
+ Over 15, will be shorten.
+ required: false
+ default: Ansible
+ msg:
+ description:
+ - The message body.
+ required: true
+ default: null
+ color:
+ description:
+ - Background color for the message. Default is yellow.
+ required: false
+ default: yellow
+ choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
+ msg_format:
+ description:
+ - message format. html or text. Default is text.
+ required: false
+ default: text
+ choices: [ "text", "html" ]
+ notify:
+ description:
+ - notify or not (change the tab color, play a sound, etc)
+ required: false
+ default: 'yes'
+ choices: [ "yes", "no" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+ api:
+ description:
+ - API url if using a self-hosted hipchat server. For hipchat api version 2 use C(/v2) path in URI
+ required: false
+ default: 'https://api.hipchat.com/v1'
+ version_added: 1.6.0
+
+
+requirements: [ ]
+author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)"
+'''
+
+EXAMPLES = '''
+- hipchat: room=notify msg="Ansible task finished"
+
+# Use Hipchat API version 2
+
+- hipchat:
+ api: "https://api.hipchat.com/v2/"
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: "Ansible task finished"
+'''
+
+# ===========================================
+# HipChat module specific support methods.
+#
+
+import urllib
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+
+DEFAULT_URI = "https://api.hipchat.com/v1"
+
+MSG_URI_V1 = "/rooms/message"
+
+NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
+
+def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI_V1):
+ '''sending message to hipchat v1 server'''
+
+ params = {}
+ params['room_id'] = room
+ params['from'] = msg_from[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['api'] = api
+ params['notify'] = int(notify)
+
+ url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
+ data = urllib.urlencode(params)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
+ '''sending message to hipchat v2 server'''
+
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+
+ body = dict()
+ body['message'] = msg
+ body['color'] = color
+ body['message_format'] = msg_format
+ body['notify'] = notify
+
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', urllib.pathname2url(room))
+ data = json.dumps(body)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ msg_from=dict(default="Ansible", aliases=['from']),
+ color=dict(default="yellow", choices=["yellow", "red", "green",
+ "purple", "gray", "random"]),
+ msg_format=dict(default="text", choices=["text", "html"]),
+ notify=dict(default=True, type='bool'),
+ validate_certs=dict(default='yes', type='bool'),
+ api=dict(default=DEFAULT_URI),
+ ),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ room = str(module.params["room"])
+ msg = module.params["msg"]
+ msg_from = module.params["msg_from"]
+ color = module.params["color"]
+ msg_format = module.params["msg_format"]
+ notify = module.params["notify"]
+ api = module.params["api"]
+
+ try:
+ if api.find('/v2') != -1:
+ send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ else:
+ send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ changed = True
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/irc.py b/lib/ansible/modules/extras/notification/irc.py
new file mode 100644
index 0000000000..92f285df24
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/irc.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: irc
+version_added: "1.2"
+short_description: Send a message to an IRC channel
+description:
+ - Send a message to an IRC channel. This is a very simplistic implementation.
+options:
+ server:
+ description:
+ - IRC server name/address
+ required: false
+ default: localhost
+ port:
+ description:
+ - IRC server port number
+ required: false
+ default: 6667
+ nick:
+ description:
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
+ required: false
+ default: ansible
+ msg:
+ description:
+ - The message body.
+ required: true
+ default: null
+ topic:
+ description:
+ - Set the channel topic
+ required: false
+ default: null
+ version_added: "2.0"
+ color:
+ description:
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
+ required: false
+ default: "none"
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
+ channel:
+ description:
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ required: true
+ nick_to:
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ required: false
+ default: null
+ version_added: "2.0"
+ key:
+ description:
+ - Channel key
+ required: false
+ version_added: "1.7"
+ passwd:
+ description:
+ - Server password
+ required: false
+ timeout:
+ description:
+ - Timeout to use while waiting for successful registration and join
+ messages, this is to prevent an endless loop
+ default: 30
+ version_added: "1.5"
+ use_ssl:
+ description:
+ - Designates whether TLS/SSL should be used when connecting to the IRC server
+ default: False
+ version_added: "1.8"
+ part:
+ description:
+ - Designates whether user should part from channel after sending message or not.
+ Useful for when using a faux bot and not wanting join/parts between messages.
+ default: True
+ version_added: "2.0"
+ style:
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ default: None
+ required: False
+ choices: [ "bold", "underline", "reverse", "italic" ]
+ version_added: "2.0"
+
+# informational: requirements for nodes
+requirements: [ socket ]
+author:
+ - '"Jan-Piet Mens (@jpmens)"'
+ - '"Matt Martz (@sivel)"'
+'''
+
+EXAMPLES = '''
+- irc: server=irc.example.net channel="#t1" msg="Hello world"
+
+- local_action: irc port=6669
+ server="irc.example.net"
+ channel="#t1"
+ msg="All finished at {{ ansible_date_time.iso8601 }}"
+ color=red
+ nick=ansibleIRC
+
+- local_action: irc port=6669
+ server="irc.example.net"
+ channel="#t1"
+ nick_to=["nick1", "nick2"]
+ msg="All finished at {{ ansible_date_time.iso8601 }}"
+ color=red
+ nick=ansibleIRC
+'''
+
+# ===========================================
+# IRC module support methods.
+#
+
+import re
+import socket
+import ssl
+
+from time import sleep
+
+
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None,
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ '''send message to IRC'''
+
+ colornumbers = {
+ 'white': "00",
+ 'black': "01",
+ 'blue': "02",
+ 'green': "03",
+ 'red': "04",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
+ 'yellow': "08",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
+ }
+
+ try:
+ styletext = stylechoices[style]
+ except:
+ styletext = ""
+
+ try:
+ colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
+ except:
+ colortext = ""
+
+ message = styletext + colortext + msg
+
+ irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if use_ssl:
+ irc = ssl.wrap_socket(irc)
+ irc.connect((server, int(port)))
+ if passwd:
+ irc.send('PASS %s\r\n' % passwd)
+ irc.send('NICK %s\r\n' % nick)
+ irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))
+ motd = ''
+ start = time.time()
+ while 1:
+ motd += irc.recv(1024)
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC server welcome response')
+ sleep(0.5)
+
+ if key:
+ irc.send('JOIN %s %s\r\n' % (channel, key))
+ else:
+ irc.send('JOIN %s\r\n' % channel)
+
+ join = ''
+ start = time.time()
+ while 1:
+ join += irc.recv(1024)
+ if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M):
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC JOIN response')
+ sleep(0.5)
+
+ if topic is not None:
+ irc.send('TOPIC %s :%s\r\n' % (channel, topic))
+ sleep(1)
+
+ if nick_to:
+ for nick in nick_to:
+ irc.send('PRIVMSG %s :%s\r\n' % (nick, message))
+ if channel:
+ irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
+ sleep(1)
+ if part:
+ irc.send('PART %s\r\n' % channel)
+ irc.send('QUIT\r\n')
+ sleep(1)
+ irc.close()
+
+# ===========================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(type='int', default=6667),
+ nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list'),
+ msg=dict(required=True),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
+ channel=dict(required=False),
+ key=dict(no_log=True),
+ topic=dict(),
+ passwd=dict(no_log=True),
+ timeout=dict(type='int', default=30),
+ part=dict(type='bool', default=True),
+ use_ssl=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
+ )
+
+ server = module.params["server"]
+ port = module.params["port"]
+ nick = module.params["nick"]
+ nick_to = module.params["nick_to"]
+ msg = module.params["msg"]
+ color = module.params["color"]
+ channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
+ key = module.params["key"]
+ passwd = module.params["passwd"]
+ timeout = module.params["timeout"]
+ use_ssl = module.params["use_ssl"]
+ part = module.params["part"]
+ style = module.params["style"]
+
+ try:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception, e:
+ module.fail_json(msg="unable to send to IRC: %s" % e)
+
+ module.exit_json(changed=False, channel=channel, nick=nick,
+ msg=msg)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/notification/jabber.py b/lib/ansible/modules/extras/notification/jabber.py
new file mode 100644
index 0000000000..840954658f
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/jabber.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+
+
+DOCUMENTATION = '''
+---
+version_added: "1.2"
+module: jabber
+short_description: Send a message to jabber user or chat room
+description:
+ - Send a message to jabber
+options:
+ user:
+ description:
+ - User as which to connect
+ required: true
+ password:
+ description:
+ - password for user to connect
+ required: true
+ to:
+ description:
+ - user ID or name of the room, when using room use a slash to indicate your nick.
+ required: true
+ msg:
+ description:
+ - The message body.
+ required: true
+ default: null
+ host:
+ description:
+ - host to connect, overrides user info
+ required: false
+ port:
+ description:
+ - port to connect to, overrides default
+ required: false
+ default: 5222
+ encoding:
+ description:
+ - message encoding
+ required: false
+
+# informational: requirements for nodes
+requirements:
+ - python xmpp (xmpppy)
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+# send a message to a user
+- jabber: user=mybot@example.net
+ password=secret
+ to=friend@example.net
+ msg="Ansible task finished"
+
+# send a message to a room
+- jabber: user=mybot@example.net
+ password=secret
+ to=mychaps@conference.example.net/ansiblebot
+ msg="Ansible task finished"
+
+# send a message, specifying the host and port
+- jabber user=mybot@example.net
+ host=talk.example.net
+ port=5223
+ password=secret
+ to=mychaps@example.net
+ msg="Ansible task finished"
+'''
+
+import os
+import re
+import time
+
+HAS_XMPP = True
+try:
+ import xmpp
+except ImportError:
+ HAS_XMPP = False
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ password=dict(required=True, no_log=True),
+ to=dict(required=True),
+ msg=dict(required=True),
+ host=dict(required=False),
+ port=dict(required=False,default=5222),
+ encoding=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_XMPP:
+ module.fail_json(msg="The required python xmpp library (xmpppy) is not installed")
+
+ jid = xmpp.JID(module.params['user'])
+ user = jid.getNode()
+ server = jid.getDomain()
+ port = module.params['port']
+ password = module.params['password']
+ try:
+ to, nick = module.params['to'].split('/', 1)
+ except ValueError:
+ to, nick = module.params['to'], None
+
+ if module.params['host']:
+ host = module.params['host']
+ else:
+ host = server
+ if module.params['encoding']:
+ xmpp.simplexml.ENCODING = params['encoding']
+
+ msg = xmpp.protocol.Message(body=module.params['msg'])
+
+ try:
+ conn=xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host,port)):
+ module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
+ if not conn.auth(user,password,'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server))
+ # some old servers require this, also the sleep following send
+ conn.sendInitPresence(requestRoster=0)
+
+ if nick: # sending to room instead of user, need to join
+ msg.setType('groupchat')
+ msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
+ conn.send(xmpp.Presence(to=module.params['to']))
+ time.sleep(1)
+ else:
+ msg.setType('chat')
+
+ msg.setTo(to)
+ if not module.check_mode:
+ conn.send(msg)
+ time.sleep(1)
+ conn.disconnect()
+ except Exception, e:
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/notification/mail.py b/lib/ansible/modules/extras/notification/mail.py
new file mode 100644
index 0000000000..c8b2bb30c7
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/mail.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+author: "Dag Wieers (@dagwieers)"
+module: mail
+short_description: Send an email
+description:
+ - This module is useful for sending emails from playbooks.
+ - One may wonder why automate sending emails? In complex environments
+ there are from time to time processes that cannot be automated, either
+ because you lack the authority to make it so, or because not everyone
+ agrees to a common approach.
+ - If you cannot automate a specific step, but the step is non-blocking,
+ sending out an email to the responsible party to make him perform his
+ part of the bargain is an elegant way to put the responsibility in
+ someone else's lap.
+ - Of course sending out a mail can be equally useful as a way to notify
+ one or more people in a team that a specific action has been
+ (successfully) taken.
+version_added: "0.8"
+options:
+ from:
+ description:
+ - The email-address the mail is sent from. May contain address and phrase.
+ default: root
+ required: false
+ to:
+ description:
+ - The email-address(es) the mail is being sent to. This is
+ a comma-separated list, which may contain address and phrase portions.
+ default: root
+ required: false
+ cc:
+ description:
+ - The email-address(es) the mail is being copied to. This is
+ a comma-separated list, which may contain address and phrase portions.
+ required: false
+ bcc:
+ description:
+ - The email-address(es) the mail is being 'blind' copied to. This is
+ a comma-separated list, which may contain address and phrase portions.
+ required: false
+ subject:
+ description:
+ - The subject of the email being sent.
+ required: true
+ body:
+ description:
+ - The body of the email being sent.
+ default: $subject
+ required: false
+ username:
+ description:
+ - If SMTP requires username
+ default: null
+ required: false
+ version_added: "1.9"
+ password:
+ description:
+ - If SMTP requires password
+ default: null
+ required: false
+ version_added: "1.9"
+ host:
+ description:
+ - The mail server
+ default: 'localhost'
+ required: false
+ port:
+ description:
+ - The mail server port
+ default: '25'
+ required: false
+ version_added: "1.0"
+ attach:
+ description:
+ - A space-separated list of pathnames of files to attach to the message.
+ Attached files will have their content-type set to C(application/octet-stream).
+ default: null
+ required: false
+ version_added: "1.0"
+ headers:
+ description:
+ - A vertical-bar-separated list of headers which should be added to the message.
+ Each individual header is specified as C(header=value) (see example below).
+ default: null
+ required: false
+ version_added: "1.0"
+ charset:
+ description:
+ - The character set of email being sent
+ default: 'us-ascii'
+ required: false
+ subtype:
+ description:
+ - The minor mime type, can be either text or html. The major type is always text.
+ default: 'plain'
+ required: false
+ version_added: "2.0"
+"""
+
+EXAMPLES = '''
+# Example playbook sending mail to root
+- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.'
+
+# Sending an e-mail using Gmail SMTP servers
+- local_action: mail
+ host='smtp.gmail.com'
+ port=587
+ username=username@gmail.com
+ password='mysecret'
+ to="John Smith <john.smith@example.com>"
+ subject='Ansible-report'
+ body='System {{ ansible_hostname }} has been successfully provisioned.'
+
+# Send e-mail to a bunch of users, attaching files
+- local_action: mail
+ host='127.0.0.1'
+ port=2025
+ subject="Ansible-report"
+ body="Hello, this is an e-mail. I hope you like it ;-)"
+ from="jane@example.net (Jane Jolie)"
+ to="John Doe <j.d@example.org>, Suzie Something <sue@example.com>"
+ cc="Charlie Root <root@localhost>"
+ attach="/etc/group /tmp/pavatar2.png"
+ headers=Reply-To=john@example.com|X-Special="Something or other"
+ charset=utf8
+# Sending an e-mail using the remote machine, not the Ansible controller node
+- mail:
+ host='localhost'
+ port=25
+ to="John Smith <john.smith@example.com>"
+ subject='Ansible-report'
+ body='System {{ ansible_hostname }} has been successfully provisioned.'
+'''
+
+import os
+import sys
+import smtplib
+import ssl
+
+try:
+ from email import encoders
+ import email.utils
+ from email.utils import parseaddr, formataddr
+ from email.mime.base import MIMEBase
+ from mail.mime.multipart import MIMEMultipart
+ from email.mime.text import MIMEText
+except ImportError:
+ from email import Encoders as encoders
+ import email.Utils
+ from email.Utils import parseaddr, formataddr
+ from email.MIMEBase import MIMEBase
+ from email.MIMEMultipart import MIMEMultipart
+ from email.MIMEText import MIMEText
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ username = dict(default=None),
+ password = dict(default=None, no_log=True),
+ host = dict(default='localhost'),
+ port = dict(default='25'),
+ sender = dict(default='root', aliases=['from']),
+ to = dict(default='root', aliases=['recipients']),
+ cc = dict(default=None),
+ bcc = dict(default=None),
+ subject = dict(required=True, aliases=['msg']),
+ body = dict(default=None),
+ attach = dict(default=None),
+ headers = dict(default=None),
+ charset = dict(default='us-ascii'),
+ subtype = dict(default='plain')
+ )
+ )
+
+ username = module.params.get('username')
+ password = module.params.get('password')
+ host = module.params.get('host')
+ port = module.params.get('port')
+ sender = module.params.get('sender')
+ recipients = module.params.get('to')
+ copies = module.params.get('cc')
+ blindcopies = module.params.get('bcc')
+ subject = module.params.get('subject')
+ body = module.params.get('body')
+ attach_files = module.params.get('attach')
+ headers = module.params.get('headers')
+ charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
+ sender_phrase, sender_addr = parseaddr(sender)
+
+ if not body:
+ body = subject
+
+ try:
+ try:
+ smtp = smtplib.SMTP_SSL(host, port=int(port))
+ except (smtplib.SMTPException, ssl.SSLError):
+ smtp = smtplib.SMTP(host, port=int(port))
+ except Exception, e:
+ module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e))
+
+ smtp.ehlo()
+ if username and password:
+ if smtp.has_extn('STARTTLS'):
+ smtp.starttls()
+ try:
+ smtp.login(username, password)
+ except smtplib.SMTPAuthenticationError:
+ module.fail_json(msg="Authentication to %s:%s failed, please check your username and/or password" % (host, port))
+
+ msg = MIMEMultipart()
+ msg['Subject'] = subject
+ msg['From'] = formataddr((sender_phrase, sender_addr))
+ msg.preamble = "Multipart message"
+
+ if headers is not None:
+ for hdr in [x.strip() for x in headers.split('|')]:
+ try:
+ h_key, h_val = hdr.split('=')
+ msg.add_header(h_key, h_val)
+ except:
+ pass
+
+ if 'X-Mailer' not in msg:
+ msg.add_header('X-Mailer', "Ansible")
+
+ to_list = []
+ cc_list = []
+ addr_list = []
+
+ if recipients is not None:
+ for addr in [x.strip() for x in recipients.split(',')]:
+ to_list.append( formataddr( parseaddr(addr)) )
+ addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase
+ if copies is not None:
+ for addr in [x.strip() for x in copies.split(',')]:
+ cc_list.append( formataddr( parseaddr(addr)) )
+ addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase
+ if blindcopies is not None:
+ for addr in [x.strip() for x in blindcopies.split(',')]:
+ addr_list.append( parseaddr(addr)[1] )
+
+ if len(to_list) > 0:
+ msg['To'] = ", ".join(to_list)
+ if len(cc_list) > 0:
+ msg['Cc'] = ", ".join(cc_list)
+
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
+ msg.attach(part)
+
+ if attach_files is not None:
+ for file in attach_files.split():
+ try:
+ fp = open(file, 'rb')
+
+ part = MIMEBase('application', 'octet-stream')
+ part.set_payload(fp.read())
+ fp.close()
+
+ encoders.encode_base64(part)
+
+ part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file))
+ msg.attach(part)
+ except Exception, e:
+ module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e))
+
+ composed = msg.as_string()
+
+ try:
+ smtp.sendmail(sender_addr, set(addr_list), composed)
+ except Exception, e:
+ module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e))
+
+ smtp.quit()
+
+ module.exit_json(changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/notification/mqtt.py b/lib/ansible/modules/extras/notification/mqtt.py
new file mode 100644
index 0000000000..14713c2b1e
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/mqtt.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: mqtt
+short_description: Publish a message on an MQTT topic for the IoT
+version_added: "1.2"
+description:
+ - Publish a message on an MQTT topic.
+options:
+ server:
+ description:
+ - MQTT broker address/name
+ required: false
+ default: localhost
+ port:
+ description:
+ - MQTT broker port number
+ required: false
+ default: 1883
+ username:
+ description:
+ - Username to authenticate against the broker.
+ required: false
+ password:
+ description:
+ - Password for C(username) to authenticate against the broker.
+ required: false
+ client_id:
+ description:
+ - MQTT client identifier
+ required: false
+ default: hostname + pid
+ topic:
+ description:
+ - MQTT topic name
+ required: true
+ default: null
+ payload:
+ description:
+ - Payload. The special string C("None") may be used to send a NULL
+ (i.e. empty) payload which is useful to simply notify with the I(topic)
+ or to clear previously retained messages.
+ required: true
+ default: null
+ qos:
+ description:
+ - QoS (Quality of Service)
+ required: false
+ default: 0
+ choices: [ "0", "1", "2" ]
+ retain:
+ description:
+ - Setting this flag causes the broker to retain (i.e. keep) the message so that
+ applications that subsequently subscribe to the topic can received the last
+ retained message immediately.
+ required: false
+ default: False
+
+# informational: requirements for nodes
+requirements: [ mosquitto ]
+notes:
+ - This module requires a connection to an MQTT broker such as Mosquitto
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
+author: "Jan-Piet Mens (@jpmens)"
+'''
+
+EXAMPLES = '''
+- local_action: mqtt
+ topic=service/ansible/{{ ansible_hostname }}
+ payload="Hello at {{ ansible_date_time.iso8601 }}"
+ qos=0
+ retain=false
+ client_id=ans001
+'''
+
+# ===========================================
+# MQTT module support methods.
+#
+
+HAS_PAHOMQTT = True
+try:
+ import socket
+ import paho.mqtt.publish as mqtt
+except ImportError:
+ HAS_PAHOMQTT = False
+
+# ===========================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server = dict(default = 'localhost'),
+ port = dict(default = 1883, type='int'),
+ topic = dict(required = True),
+ payload = dict(required = True),
+ client_id = dict(default = None),
+ qos = dict(default="0", choices=["0", "1", "2"]),
+ retain = dict(default=False, type='bool'),
+ username = dict(default = None),
+ password = dict(default = None, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg="Paho MQTT is not installed")
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+
+ if client_id is None:
+ client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
+
+ if payload and payload == 'None':
+ payload = None
+
+ auth=None
+ if username is not None:
+ auth = { 'username' : username, 'password' : password }
+
+ try:
+ rc = mqtt.single(topic, payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth)
+ except Exception, e:
+ module.fail_json(msg="unable to publish to MQTT broker %s" % (e))
+
+ module.exit_json(changed=False, topic=topic)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/notification/nexmo.py b/lib/ansible/modules/extras/notification/nexmo.py
new file mode 100644
index 0000000000..89a246c0d9
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/nexmo.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Martz <matt@sivel.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+version_added: 1.6
+author: "Matt Martz (@sivel)"
+options:
+ api_key:
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices:
+ - 'yes'
+ - 'no'
+"""
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ local_action:
+ module: nexmo
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: "{{ inventory_hostname }} completed"
+"""
+
+import urllib
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/notification/osx_say.py b/lib/ansible/modules/extras/notification/osx_say.py
new file mode 100644
index 0000000000..7c0ba84458
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/osx_say.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Michael DeHaan <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: osx_say
+version_added: "1.2"
+short_description: Makes an OSX computer to speak.
+description:
+ - makes an OS computer speak! Amuse your friends, annoy your coworkers!
+notes:
+ - If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout.
+options:
+ msg:
+ description:
+ What to say
+ required: true
+ voice:
+ description:
+ What voice to use
+ required: false
+requirements: [ say ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox
+'''
+
+DEFAULT_VOICE='Trinoids'
+
+def say(module, msg, voice):
+ module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ voice=dict(required=False, default=DEFAULT_VOICE),
+ ),
+ supports_check_mode=False
+ )
+
+ if not os.path.exists("/usr/bin/say"):
+ module.fail_json(msg="/usr/bin/say is not installed")
+
+ msg = module.params['msg']
+ voice = module.params['voice']
+
+ say(module, msg, voice)
+
+ module.exit_json(msg=msg, changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/notification/pushbullet.py b/lib/ansible/modules/extras/notification/pushbullet.py
new file mode 100644
index 0000000000..0d5ab7c4d4
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/pushbullet.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author: "Willy Barro (@willybarro)"
+requirements: [ pushbullet.py ]
+module: pushbullet
+short_description: Sends notifications to Pushbullet
+description:
+ - This module sends push notifications via Pushbullet to channels or devices.
+version_added: "2.0"
+options:
+ api_key:
+ description:
+ - Push bullet API token
+ required: true
+ channel:
+ description:
+ - The channel TAG you wish to broadcast a push notification,
+ as seen on the "My Channels" > "Edit your channel" at
+ Pushbullet page.
+ required: false
+ default: null
+ device:
+ description:
+ - The device NAME you wish to send a push notification,
+ as seen on the Pushbullet main page.
+ required: false
+ default: null
+ push_type:
+ description:
+ - Thing you wish to push.
+ required: false
+ default: note
+ choices: [ "note", "link" ]
+ title:
+ description:
+ - Title of the notification.
+ required: true
+ body:
+ description:
+ - Body of the notification, e.g. Details of the fault you're alerting.
+ required: false
+
+notes:
+ - Requires pushbullet.py Python package on the remote host.
+ You can install it via pip with ($ pip install pushbullet.py).
+ See U(https://github.com/randomchars/pushbullet.py)
+'''
+
+EXAMPLES = '''
+# Sends a push notification to a device
+- pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ title: "You may see this on Google Chrome"
+
+# Sends a link to a device
+- pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ push_type: "link"
+ title: "Ansible Documentation"
+ body: "http://docs.ansible.com/"
+
+# Sends a push notification to a channel
+- pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ channel: "my-awesome-channel"
+ title: "Broadcasting a message to the #my-awesome-channel folks"
+
+# Sends a push notification with title and body to a channel
+- pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ channel: "my-awesome-channel"
+ title: "ALERT! Signup service is down"
+ body: "Error rate on signup service is over 90% for more than 2 minutes"
+'''
+
+try:
+ from pushbullet import PushBullet
+ from pushbullet.errors import InvalidKeyError, PushError
+except ImportError:
+ pushbullet_found = False
+else:
+ pushbullet_found = True
+
+# ===========================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_key = dict(type='str', required=True, no_log=True),
+ channel = dict(type='str', default=None),
+ device = dict(type='str', default=None),
+ push_type = dict(type='str', default="note", choices=['note', 'link']),
+ title = dict(type='str', required=True),
+ body = dict(type='str', default=None),
+ url = dict(type='str', default=None),
+ ),
+ mutually_exclusive = (
+ ['channel', 'device'],
+ ),
+ supports_check_mode=True
+ )
+
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
+
+ if not pushbullet_found:
+ module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py")
+
+ # Init pushbullet
+ try:
+ pb = PushBullet(api_key)
+ target = None
+ except InvalidKeyError:
+ module.fail_json(msg="Invalid api_key")
+
+ # Checks for channel/device
+ if device is None and channel is None:
+ module.fail_json(msg="You need to provide a channel or a device.")
+
+ # Search for given device
+ if device is not None:
+ devices_by_nickname = {}
+ for d in pb.devices:
+ devices_by_nickname[d.nickname] = d
+
+ if device in devices_by_nickname:
+ target = devices_by_nickname[device]
+ else:
+ module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
+
+ # Search for given channel
+ if channel is not None:
+ channels_by_tag = {}
+ for c in pb.channels:
+ channels_by_tag[c.channel_tag] = c
+
+ if channel in channels_by_tag:
+ target = channels_by_tag[channel]
+ else:
+ module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
+
+ # If in check mode, exit saying that we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False, msg="OK")
+
+ # Send push notification
+ try:
+ if push_type == "link":
+ target.push_link(title, url, body)
+ else:
+ target.push_note(title, body)
+ module.exit_json(changed=False, msg="OK")
+ except PushError as e:
+ module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
+
+ module.fail_json(msg="An unknown error has occurred")
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/notification/pushover.py b/lib/ansible/modules/extras/notification/pushover.py
new file mode 100644
index 0000000000..2cd973b1bc
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/pushover.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
+# All rights reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+###
+
+DOCUMENTATION = '''
+---
+module: pushover
+version_added: "2.0"
+short_description: Send notifications via U(https://pushover.net)
+description:
+ - Send notifications via pushover, to subscriber list of devices, and email
+ addresses. Requires pushover app on devices.
+notes:
+ - You will require a pushover.net account to use this module. But no account
+ is required to receive messages.
+options:
+ msg:
+ description:
+ - What message you wish to send.
+ required: true
+ app_token:
+ description:
+ - Pushover issued token identifying your pushover app.
+ required: true
+ user_key:
+ description:
+ - Pushover issued authentication key for your user.
+ required: true
+ pri:
+ description:
+ - Message priority (see U(https://pushover.net) for details.)
+ required: false
+
+author: "Jim Richardson (@weaselkeeper)"
+'''
+
+EXAMPLES = '''
+- local_action: pushover msg="{{inventory_hostname}} has exploded in flames,
+ It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59
+'''
+
+import urllib
+
+
+class Pushover(object):
+ ''' Instantiates a pushover object, use it to send notifications '''
+ base_uri = 'https://api.pushover.net'
+ port = 443
+
+ def __init__(self, module, user, token):
+ self.module = module
+ self.user = user
+ self.token = token
+
+ def run(self, priority, msg):
+ ''' Do, whatever it is, we do. '''
+
+ url = '%s:%s/1/messages.json' % (self.base_uri, self.port)
+
+ # parse config
+ options = dict(user=self.user,
+ token=self.token,
+ priority=priority,
+ message=msg)
+ data = urllib.urlencode(options)
+
+ headers = { "Content-type": "application/x-www-form-urlencoded"}
+ r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
+ if info['status'] != 200:
+ raise Exception(info)
+
+ return r.read()
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']),
+ ),
+ )
+
+ msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
+ try:
+ response = msg_object.run(module.params['pri'], module.params['msg'])
+ except:
+ module.fail_json(msg='Unable to send msg via pushover')
+
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/rocketchat.py b/lib/ansible/modules/extras/notification/rocketchat.py
new file mode 100644
index 0000000000..ffce79712b
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/rocketchat.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The M(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+version_added: "2.2"
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(subdomain.domain.com or chat.domain.tld))
+ required: true
+ token:
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ required: false
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ description:
+ - Message to be sent.
+ required: false
+ default: None
+ channel:
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specifed during the creation of webhook.
+ required: false
+ default: None
+ username:
+ description:
+ - This is the sender of the message.
+ required: false
+ default: "Ansible"
+ icon_url:
+ description:
+ - URL for the message sender's icon.
+ required: false
+ default: "https://www.ansible.com/favicon.ico"
+ icon_emoji:
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ required: false
+ default: None
+ link_names:
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ required: false
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices:
+ - 'yes'
+ - 'no'
+ color:
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ required: false
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ description:
+ - Define a list of attachments.
+ required: false
+ default: None
+"""
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ local_action:
+ module: rocketchat
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via Rocket Chat all options
+ local_action:
+ module: rocketchat
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: "{{ inventory_hostname }} completed"
+ channel: "#ansible"
+ username: "Ansible on {{ inventory_hostname }}"
+ icon_url: "http://www.example.com/some-image-file.png"
+ link_names: 0
+
+- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: "{{ inventory_hostname }} is alive!"
+ color: good
+ username: ""
+ icon_url: ""
+
+- name: Use the attachments API
+ rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: "Display my system load on host A and B"
+ color: "#ff00dd"
+ title: "System load"
+ fields:
+ - title: "System A"
+ value: "load average: 0,74, 0,66, 0,63"
+ short: "true"
+ - title: "System B"
+ value: "load average: 5,16, 4,64, 2,43"
+ short: "true"
+
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: boolean
+ sample: false
+"""
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload="payload=" + module.jsonify(payload)
+ return payload
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ domain = dict(type='str', required=True, default=None),
+ token = dict(type='str', required=True, no_log=True),
+ protocol = dict(type='str', default='https', choices=['http', 'https']),
+ msg = dict(type='str', required=False, default=None),
+ channel = dict(type='str', default=None),
+ username = dict(type='str', default='Ansible'),
+ icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji = dict(type='str', default=None),
+ link_names = dict(type='int', default=1, choices=[0,1]),
+ validate_certs = dict(default='yes', type='bool'),
+ color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments = dict(type='list', required=False, default=None)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/sendgrid.py b/lib/ansible/modules/extras/notification/sendgrid.py
new file mode 100644
index 0000000000..ac2db6b1ce
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/sendgrid.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Makai <matthew.makai@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+version_added: "2.0"
+module: sendgrid
+short_description: Sends an email with the SendGrid API
+description:
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
+notes:
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need an active SendGrid
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 username and password are not required if you supply an api_key"
+requirements:
+ - sendgrid python library
+options:
+ username:
+ description:
+ - username for logging into the SendGrid account.
+ - Since 2.2 it is only required if api_key is not supplied.
+ required: false
+ default: null
+ password:
+ description:
+ - password that corresponds to the username
+ - Since 2.2 it is only required if api_key is not supplied.
+ required: false
+ default: null
+ from_address:
+ description:
+ - the address in the "from" field for the email
+ required: true
+ to_addresses:
+ description:
+ - a list with one or more recipient email addresses
+ required: true
+ subject:
+ description:
+ - the desired subject for the email
+ required: true
+ api_key:
+ description:
+ - sendgrid API key to use instead of username/password
+ version_added: 2.2
+ required: false
+ default: null
+ cc:
+ description:
+ - a list of email addresses to cc
+ version_added: 2.2
+ required: false
+ default: null
+ bcc:
+ description:
+ - a list of email addresses to bcc
+ version_added: 2.2
+ required: false
+ default: null
+ attachments:
+ description:
+ - a list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs)
+ version_added: 2.2
+ required: false
+ default: null
+ from_name:
+ description:
+ - the name you want to appear in the from field, i.e 'John Doe'
+ version_added: 2.2
+ required: false
+ default: null
+ html_body:
+ description:
+ - whether the body is html content that should be rendered
+ version_added: 2.2
+ required: false
+ default: false
+ headers:
+ description:
+ - a dict to pass on as headers
+ version_added: 2.2
+ required: false
+ default: null
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an email to a single recipient that the deployment was successful
+- sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "ansible@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ subject: "Deployment success."
+ body: "The most recent Ansible deployment was successful."
+ delegate_to: localhost
+
+# send an email to more than one recipient that the build failed
+- sendgrid
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "build@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ - "devteam@mycompany.com"
+ subject: "Build failure!."
+ body: "Unable to pull source repository from Git server."
+ delegate_to: localhost
+'''
+
+# =======================================
+# sendgrid module support methods
+#
+import urllib
+
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ HAS_SENDGRID = False
+
+def post_sendgrid_api(module, username, password, from_address, to_addresses,
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key':password,
+ 'from':from_address, 'subject': subject, 'text': body}
+ encoded_data = urllib.urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ if isinstance(recipient, unicode):
+ recipient = recipient.encode('utf-8')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = { 'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
+# =======================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list'),
+ cc=dict(required=False, type='list'),
+ headers=dict(required=False, type='dict'),
+ from_address=dict(required=True),
+ from_name=dict(required=False),
+ to_addresses=dict(required=True, type='list'),
+ subject=dict(required=True),
+ body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list')
+ ),
+ supports_check_mode=True,
+ mutually_exclusive = [
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together = [['username', 'password']],
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
+ from_address = module.params['from_address']
+ to_addresses = module.params['to_addresses']
+ subject = module.params['subject']
+ body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg != None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ module.fail_json(msg='You must install the sendgrid python library if you want to use any of the following arguments: api_key, bcc, cc, headers, from_name, html_body, attachments')
+
+ response, info = post_sendgrid_api(module, username, password,
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
+
+ module.exit_json(msg=subject, changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/slack.py b/lib/ansible/modules/extras/notification/slack.py
new file mode 100644
index 0000000000..2ac609d451
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/slack.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+version_added: "1.6"
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ description:
+ - Slack (sub)domain for your environment without protocol. (i.e.
+ C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may
+ be ignored. See token documentation for information.
+ required: false
+ default: None
+ token:
+ description:
+ - Slack integration token. This authenticates you to the slack service.
+ Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ 1.8 and above, ansible adapts to the new slack API where tokens look
+ like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then slack will ignore any value of domain. If
+ the token is in the old format the domain is required. Ansible has no
+ control of when slack will get rid of the old API. When slack does
+ that the old format will stop working.
+ required: true
+ msg:
+ description:
+ - Message to send.
+ required: false
+ default: None
+ channel:
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ required: false
+ default: None
+ username:
+ description:
+ - This is the sender of the message.
+ required: false
+ default: "Ansible"
+ icon_url:
+ description:
+ - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
+ required: false
+ icon_emoji:
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ required: false
+ default: None
+ link_names:
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ required: false
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ description:
+ - Setting for the message parser at Slack
+ required: false
+ default: None
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices:
+ - 'yes'
+ - 'no'
+ color:
+ version_added: "2.0"
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ required: false
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API. For more information, see https://api.slack.com/docs/attachments
+ required: false
+ default: None
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ local_action:
+ module: slack
+ token: thetoken/generatedby/slack
+ msg: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via Slack all options
+ local_action:
+ module: slack
+ token: thetoken/generatedby/slack
+ msg: "{{ inventory_hostname }} completed"
+ channel: "#ansible"
+ username: "Ansible on {{ inventory_hostname }}"
+ icon_url: "http://www.example.com/some-image-file.png"
+ link_names: 0
+ parse: 'none'
+
+- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
+ slack:
+ token: thetoken/generatedby/slack
+ msg: "{{ inventory_hostname }} is alive!"
+ color: good
+ username: ""
+ icon_url: ""
+
+- name: Use the attachments API
+ slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: "Display my system load on host A and B"
+ color: "#ff00dd"
+ title: "System load"
+ fields:
+ - title: "System A"
+ value: "load average: 0,74, 0,66, 0,63"
+ short: "true"
+ - title: "System B"
+ value: "load average: 5,16, 4,64, 2,43"
+ short: "true"
+
+- name: Send notification message via Slack (deprecated API using domian)
+ local_action:
+ module: slack
+ domain: future500.slack.com
+ token: thetokengeneratedbyslack
+ msg: "{{ inventory_hostname }} completed"
+
+"""
+
+OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
+
+def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitely turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=text, color=color, mrkdwn_in=["text"])])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#'+channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload="payload=" + module.jsonify(payload)
+ return payload
+
+def do_notify_slack(module, domain, token, payload):
+ if token.count('/') >= 2:
+ # New style token
+ slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token)
+ else:
+ if not domain:
+ module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook")
+ slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ response, info = fetch_url(module, slack_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ domain = dict(type='str', required=False, default=None),
+ token = dict(type='str', required=True, no_log=True),
+ msg = dict(type='str', required=False, default=None),
+ channel = dict(type='str', default=None),
+ username = dict(type='str', default='Ansible'),
+ icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji = dict(type='str', default=None),
+ link_names = dict(type='int', default=1, choices=[0,1]),
+ parse = dict(type='str', default=None, choices=['none', 'full']),
+ validate_certs = dict(default='yes', type='bool'),
+ color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments = dict(type='list', required=False, default=None)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments)
+ do_notify_slack(module, domain, token, payload)
+
+ module.exit_json(msg="OK")
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/sns.py b/lib/ansible/modules/extras/notification/sns.py
new file mode 100644
index 0000000000..4eb79e13ad
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/sns.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael J. Schultz <mjschultz@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: sns
+short_description: Send Amazon Simple Notification Service (SNS) messages
+description:
+ - The M(sns) module sends notifications to a topic on your Amazon SNS account
+version_added: 1.6
+author: "Michael J. Schultz (@mjschultz)"
+options:
+ msg:
+ description:
+ - Default message to send.
+ required: true
+ aliases: [ "default" ]
+ subject:
+ description:
+ - Subject line for email delivery.
+ required: false
+ topic:
+ description:
+ - The topic you want to publish to.
+ required: true
+ email:
+ description:
+ - Message to send to email-only subscription
+ required: false
+ sqs:
+ description:
+ - Message to send to SQS-only subscription
+ required: false
+ sms:
+ description:
+ - Message to send to SMS-only subscription
+ required: false
+ http:
+ description:
+ - Message to send to HTTP-only subscription
+ required: false
+ https:
+ description:
+ - Message to send to HTTPS-only subscription
+ required: false
+ aws_secret_key:
+ description:
+ - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ required: false
+ default: None
+ aliases: ['ec2_secret_key', 'secret_key']
+ aws_access_key:
+ description:
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ required: false
+ default: None
+ aliases: ['ec2_access_key', 'access_key']
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+
+requirements:
+ - "boto"
+"""
+
+EXAMPLES = """
+- name: Send default notification message via SNS
+ local_action:
+ module: sns
+ msg: "{{ inventory_hostname }} has completed the play."
+ subject: "Deploy complete!"
+ topic: "deploy"
+
+- name: Send notification messages via SNS with short message for SMS
+ local_action:
+ module: sns
+ msg: "{{ inventory_hostname }} has completed the play."
+ sms: "deployed!"
+ subject: "Deploy complete!"
+ topic: "deploy"
+"""
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, get_aws_connection_info
+from ansible.module_utils.pycompat24 import get_exception
+
+try:
+ import boto
+ import boto.ec2
+ import boto.sns
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def arn_topic_lookup(connection, short_topic):
+ response = connection.get_all_topics()
+ result = response[u'ListTopicsResponse'][u'ListTopicsResult']
+ # topic names cannot have colons, so this captures the full topic name
+ lookup_topic = ':{}'.format(short_topic)
+ for topic in result[u'Topics']:
+ if topic[u'TopicArn'].endswith(lookup_topic):
+ return topic[u'TopicArn']
+ return None
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ msg=dict(type='str', required=True, aliases=['default']),
+ subject=dict(type='str', default=None),
+ topic=dict(type='str', required=True),
+ email=dict(type='str', default=None),
+ sqs=dict(type='str', default=None),
+ sms=dict(type='str', default=None),
+ http=dict(type='str', default=None),
+ https=dict(type='str', default=None),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ msg = module.params['msg']
+ subject = module.params['subject']
+ topic = module.params['topic']
+ email = module.params['email']
+ sqs = module.params['sqs']
+ sms = module.params['sms']
+ http = module.params['http']
+ https = module.params['https']
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="region must be specified")
+ try:
+ connection = connect_to_aws(boto.sns, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ # .publish() takes full ARN topic id, but I'm lazy and type shortnames
+ # so do a lookup (topics cannot contain ':', so thats the decider)
+ if ':' in topic:
+ arn_topic = topic
+ else:
+ arn_topic = arn_topic_lookup(connection, topic)
+
+ if not arn_topic:
+ module.fail_json(msg='Could not find topic: {}'.format(topic))
+
+ dict_msg = {'default': msg}
+ if email:
+ dict_msg.update(email=email)
+ if sqs:
+ dict_msg.update(sqs=sqs)
+ if sms:
+ dict_msg.update(sms=sms)
+ if http:
+ dict_msg.update(http=http)
+ if https:
+ dict_msg.update(https=https)
+
+ json_msg = json.dumps(dict_msg)
+ try:
+ connection.publish(topic=arn_topic, subject=subject,
+ message_structure='json', message=json_msg)
+ except boto.exception.BotoServerError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg="OK")
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/telegram.py b/lib/ansible/modules/extras/notification/telegram.py
new file mode 100644
index 0000000000..254a1bf12f
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/telegram.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = """
+
+module: telegram
+version_added: "2.2"
+author: "Artem Feofanov (@tyouxa)"
+
+short_description: module for sending notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+options:
+ msg:
+ description:
+ - What message you wish to send.
+ required: true
+ token:
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ chat_id:
+ description:
+ - Telegram group or user chat_id
+ required: true
+
+"""
+
+EXAMPLES = """
+
+send a message to chat in playbook
+- telegram: token=bot9999999:XXXXXXXXXXXXXXXXXXXXXXX
+ chat_id=000000
+ msg="Ansible task finished"
+
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: string
+ sample: "Ansible task finished"
+
+
+"""
+
+import urllib
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ token = dict(type='str',required=True,no_log=True),
+ chat_id = dict(type='str',required=True,no_log=True),
+ msg = dict(type='str',required=True)),
+ supports_check_mode=True
+ )
+
+ token = urllib.quote(module.params.get('token'))
+ chat_id = urllib.quote(module.params.get('chat_id'))
+ msg = urllib.quote(module.params.get('msg'))
+
+ url = 'https://api.telegram.org/' + token + '/sendMessage?text=' + msg + '&chat_id=' + chat_id
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/twilio.py b/lib/ansible/modules/extras/notification/twilio.py
new file mode 100644
index 0000000000..2c7275a3e9
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/twilio.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Makai <matthew.makai@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+version_added: "1.6"
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio.
+description:
+ - Sends a text message to a phone number through the Twilio messaging API.
+notes:
+ - This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails.
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+options:
+ account_sid:
+ description:
+ user's Twilio account token found on the account page
+ required: true
+ auth_token:
+ description: user's Twilio authentication token
+ required: true
+ msg:
+ description:
+ the body of the text message
+ required: true
+ to_number:
+ description:
+ one or more phone numbers to send the text message to,
+ format +15551112222
+ required: true
+ from_number:
+ description:
+ the Twilio number to send the text message from, format +15551112222
+ required: true
+ media_url:
+ description:
+ a URL with a picture, video or sound clip to send with an MMS
+ (multimedia message) instead of a plain SMS
+ required: false
+
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an SMS about the build status to (555) 303 5681
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- twilio:
+ msg: "All servers with webserver role are now configured."
+ account_sid: "ACXXXXXXXXXXXXXXXXX"
+ auth_token: "ACXXXXXXXXXXXXXXXXX"
+ from_number: "+15552014545"
+ to_number: "+15553035681"
+ delegate_to: localhost
+
+# send an SMS to multiple phone numbers about the deployment
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- twilio:
+ msg: "This server's configuration is now complete."
+ account_sid: "ACXXXXXXXXXXXXXXXXX"
+ auth_token: "ACXXXXXXXXXXXXXXXXX"
+ from_number: "+15553258899"
+ to_number:
+ - "+15551113232"
+ - "+12025551235"
+ - "+19735559010"
+ delegate_to: localhost
+
+# send an MMS to a single recipient with an update on the deployment
+# and an image of the results
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- twilio:
+ msg: "Deployment complete!"
+ account_sid: "ACXXXXXXXXXXXXXXXXX"
+ auth_token: "ACXXXXXXXXXXXXXXXXX"
+ from_number: "+15552014545"
+ to_number: "+15553035681"
+ media_url: "https://demo.twilio.com/logo.png"
+ delegate_to: localhost
+'''
+
+# =======================================
+# twilio module support methods
+#
+import urllib
+
+
+def post_twilio_api(module, account_sid, auth_token, msg, from_number,
+ to_number, media_url=None):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible"
+
+ data = {'From':from_number, 'To':to_number, 'Body':msg}
+ if media_url:
+ data['MediaUrl'] = media_url
+ encoded_data = urllib.urlencode(data)
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = account_sid.replace('\n', '')
+ module.params['url_password'] = auth_token.replace('\n', '')
+
+ return fetch_url(module, URI, data=encoded_data, headers=headers)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_number=dict(required=True),
+ media_url=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_number = module.params['to_number']
+ media_url = module.params['media_url']
+
+ if not isinstance(to_number, list):
+ to_number = [to_number]
+
+ for number in to_number:
+ r, info = post_twilio_api(module, account_sid, auth_token, msg,
+ from_number, number, media_url)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = json.loads(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
+
+ module.exit_json(msg=msg, changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/notification/typetalk.py b/lib/ansible/modules/extras/notification/typetalk.py
new file mode 100644
index 0000000000..2f91022936
--- /dev/null
+++ b/lib/ansible/modules/extras/notification/typetalk.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+---
+module: typetalk
+version_added: "1.6"
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ )
+options:
+ client_id:
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ description:
+ - message body
+ required: true
+requirements: [ json ]
+author: "Takashi Someda (@tksmd)"
+'''
+
+EXAMPLES = '''
+- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed"
+'''
+
+import urllib
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ json = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
+
+def do_request(module, url, params, headers=None):
+ data = urllib.urlencode(params)
+ if headers is None:
+ headers = dict()
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ r, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ exc = ConnectionError(info['msg'])
+ exc.code = info['status']
+ raise exc
+ return r
+
+
+def get_access_token(module, client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request(module, 'https://typetalk.in/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(module, client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(module, client_id, client_secret)
+ url = 'https://typetalk.in/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(module, url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except ConnectionError:
+ e = get_exception()
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not json:
+ module.fail_json(msg="json module is required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(module, client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/__init__.py b/lib/ansible/modules/extras/packaging/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/__init__.py
diff --git a/lib/ansible/modules/extras/packaging/dpkg_selections.py b/lib/ansible/modules/extras/packaging/dpkg_selections.py
new file mode 100644
index 0000000000..fa0f73a713
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/dpkg_selections.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: dpkg_selections
+short_description: Dpkg package selection selections
+description:
+ - Change dpkg package selection state via --get-selections and --set-selections.
+version_added: "2.0"
+author: Brian Brazil <brian.brazil@boxever.com>
+options:
+ name:
+ description:
+ - Name of the package
+ required: true
+ selection:
+ description:
+ - The selection state to set the package to.
+ choices: [ 'install', 'hold', 'deinstall', 'purge' ]
+ required: true
+notes:
+ - This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
+'''
+EXAMPLES = '''
+# Prevent python from being upgraded.
+- dpkg_selections: name=python selection=hold
+'''
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ selection = dict(choices=['install', 'hold', 'deinstall', 'purge'])
+ ),
+ supports_check_mode=True,
+ )
+
+ dpkg = module.get_bin_path('dpkg', True)
+
+ name = module.params['name']
+ selection = module.params['selection']
+
+ # Get current settings.
+ rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
+ if not out:
+ current = 'not present'
+ else:
+ current = out.split()[1]
+
+ changed = current != selection
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, before=current, after=selection)
+
+ module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
+ module.exit_json(changed=changed, before=current, after=selection)
+
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/elasticsearch_plugin.py b/lib/ansible/modules/extras/packaging/elasticsearch_plugin.py
new file mode 100644
index 0000000000..7e01b4a4d5
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/elasticsearch_plugin.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import os
+
+"""
+Ansible module to manage elasticsearch plugins
+(c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+version_added: "2.0"
+author: Mathew Davies (@ThePixelDeveloper)
+options:
+ name:
+ description:
+ - Name of the plugin to install. In ES 2.x, the name can be an url or file location
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ required: False
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x)
+ required: False
+ default: None
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ required: False
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the plugin binary
+ required: False
+ default: /usr/share/elasticsearch/bin/plugin
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ required: False
+ default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ required: False
+ default: None
+ version_added: "2.1"
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ required: False
+ default: None
+ version_added: "2.1"
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+ required: False
+ default: None
+'''
+
+EXAMPLES = '''
+# Install Elasticsearch head plugin
+- elasticsearch_plugin: state=present name="mobz/elasticsearch-head"
+
+# Install specific version of a plugin
+- elasticsearch_plugin: state=present name="com.github.kzwang/elasticsearch-image" version="1.2.0"
+
+# Uninstall Elasticsearch head plugin
+- elasticsearch_plugin: state=absent name="mobz/elasticsearch-head"
+'''
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+def install_plugin(module, plugin_bin, plugin_name, version, url, proxy_host, proxy_port, timeout):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if version:
+ name = name + '/' + version
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin", type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ )
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, url, proxy_host, proxy_port, timeout)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/kibana_plugin.py b/lib/ansible/modules/extras/packaging/kibana_plugin.py
new file mode 100644
index 0000000000..f0ffcd9ddf
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/kibana_plugin.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage elasticsearch shield role
+(c) 2016, Thierno IB. BARRY @barryib
+Sponsored by Polyconseil http://polyconseil.fr.
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - Manages Kibana plugins.
+version_added: "2.2"
+author: Thierno IB. BARRY (@barryib)
+options:
+ name:
+ description:
+ - Name of the plugin to install
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ required: False
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ For local file, prefix its absolute path with file://
+ required: False
+ default: None
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ required: False
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the plugin binary
+ required: False
+ default: /opt/kibana/bin/kibana
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana
+ required: False
+ default: /opt/kibana/installedPlugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated if C(force) is not set to yes
+ required: False
+ default: None
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update
+ required: False
+ choices: ["yes", "no"]
+ default: no
+'''
+
+EXAMPLES = '''
+# Install Elasticsearch head plugin
+- kibana_plugin: state=present name="elasticsearch/marvel"
+
+# Install specific version of a plugin
+- kibana_plugin: state=present name="elasticsearch/marvel" version="2.3.3"
+
+# Uninstall Elasticsearch head plugin
+- kibana_plugin: state=absent name="elasticsearch/marvel"
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin mangement (install / remove)
+ returned: success
+ type: string
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: string
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: string
+timeout:
+ description: the timout for plugin download
+ returned: success
+ type: string
+stdout:
+ description: the command stdout
+ returned: success
+ type: string
+stderr:
+ description: the command stderr
+ returned: success
+ type: string
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: string
+'''
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout):
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default="no", type="bool")
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if (version):
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/language/__init__.py b/lib/ansible/modules/extras/packaging/language/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/__init__.py
diff --git a/lib/ansible/modules/extras/packaging/language/bower.py b/lib/ansible/modules/extras/packaging/language/bower.py
new file mode 100644
index 0000000000..2b58b1ce1f
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/bower.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bower
+short_description: Manage bower packages with bower
+description:
+ - Manage bower packages with bower
+version_added: 1.9
+author: "Michael Warkentin (@mwarkentin)"
+options:
+ name:
+ description:
+ - The name of a bower package to install
+ required: false
+ offline:
+ description:
+ - Install packages from local cache, if the packages were installed before
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ production:
+ description:
+ - Install with --production flag
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.0"
+ path:
+ description:
+ - The base path where to install the bower packages
+ required: true
+ relative_execpath:
+ description:
+ - Relative path to bower executable from install path
+ default: null
+ required: false
+ version_added: "2.1"
+ state:
+ description:
+ - The state of the bower package
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ version:
+ description:
+ - The version to be installed
+ required: false
+'''
+
+EXAMPLES = '''
+description: Install "bootstrap" bower package.
+- bower: name=bootstrap
+
+description: Install "bootstrap" bower package on version 3.1.1.
+- bower: name=bootstrap version=3.1.1
+
+description: Remove the "bootstrap" bower package.
+- bower: name=bootstrap state=absent
+
+description: Install packages based on bower.json.
+- bower: path=/app/location
+
+description: Update packages based on bower.json to their latest version.
+- bower: path=/app/location state=latest
+
+description: install bower locally and run from there
+- npm: path=/app/location name=bower global=no
+- bower: path=/app/location relative_execpath=node_modules/.bin
+'''
+
+
+class Bower(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs['name']
+ self.offline = kwargs['offline']
+ self.production = kwargs['production']
+ self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
+ self.version = kwargs['version']
+
+ if kwargs['version']:
+ self.name_version = self.name + '#' + self.version
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
+
+ if self.name:
+ cmd.append(self.name_version)
+
+ if self.offline:
+ cmd.append('--offline')
+
+ if self.production:
+ cmd.append('--production')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ outdated = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ dep_data = data['dependencies'][dep]
+ if dep_data.get('missing', False):
+ missing.append(dep)
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
+ outdated.append(dep)
+ elif dep_data.get('incompatible', False):
+ outdated.append(dep)
+ else:
+ installed.append(dep)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing, outdated
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ offline=dict(default='no', type='bool'),
+ production=dict(default='no', type='bool'),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
+ state=dict(default='present', choices=['present', 'absent', 'latest', ]),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec
+ )
+
+ name = module.params['name']
+ offline = module.params['offline']
+ production = module.params['production']
+ path = os.path.expanduser(module.params['path'])
+ relative_execpath = module.params['relative_execpath']
+ state = module.params['state']
+ version = module.params['version']
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
+
+ changed = False
+ if state == 'present':
+ installed, missing, outdated = bower.list()
+ if len(missing):
+ changed = True
+ bower.install()
+ elif state == 'latest':
+ installed, missing, outdated = bower.list()
+ if len(missing) or len(outdated):
+ changed = True
+ bower.update()
+ else: # Absent
+ installed, missing, outdated = bower.list()
+ if name in installed:
+ changed = True
+ bower.uninstall()
+
+ module.exit_json(changed=changed)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/language/bundler.py b/lib/ansible/modules/extras/packaging/language/bundler.py
new file mode 100644
index 0000000000..152b51810a
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/bundler.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION='''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+version_added: "2.0.0"
+options:
+ executable:
+ description:
+ - The path to the bundler executable
+ required: false
+ default: null
+ state:
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ required: false
+ choices: [present, latest]
+ default: present
+ chdir:
+ description:
+ - The directory to execute the bundler commands from. This directoy
+ needs to contain a valid Gemfile or .bundle/ directory
+ required: false
+ default: temporary working directory
+ exclude_groups:
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ required: false
+ default: null
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ required: false
+ choices: [yes, no]
+ default: "no"
+ gemfile:
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ required: false
+ default: Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ required: false
+ choices: [yes, no]
+ default: "no"
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will only install gems
+ that are in the default or production groups. Requires a Gemfile.lock
+ file to have been created prior
+ required: false
+ choices: [yes, no]
+ default: "no"
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ required: false
+ choices: [yes, no]
+ default: "yes"
+ gem_path:
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ required: false
+ default: RubyGems gem paths
+ binstub_directory:
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ required: false
+ default: null
+ extra_args:
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+ required: false
+ default: null
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES='''
+# Installs gems from a Gemfile in the current directory
+- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle
+
+# Excludes the production group from installing
+- bundler: state=present exclude_groups=production
+
+# Only install gems from the default and production groups
+- bundler: state=present deployment_mode=yes
+
+# Installs gems using a Gemfile in another directory
+- bundler: state=present gemfile=../rails_project/Gemfile
+
+# Updates Gemfile in another directory
+- bundler: state=latest chdir=~/rails_project
+'''
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ return module.params.get('executable').split(' ')
+ else:
+ return [ module.get_bin_path('bundle', True) ]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False, type='path'),
+ exclude_groups=dict(default=None, required=False, type='list'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False, type='path'),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/language/composer.py b/lib/ansible/modules/extras/packaging/language/composer.py
new file mode 100644
index 0000000000..4c5f8518be
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/composer.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: composer
+author:
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
+short_description: Dependency Manager for PHP
+version_added: "1.6"
+description:
+ - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
+options:
+ command:
+ version_added: "1.8"
+ description:
+ - Composer command like "install", "update" and so on
+ required: false
+ default: install
+ arguments:
+ version_added: "2.0"
+ description:
+ - Composer arguments like required package, version and so on
+ required: false
+ default: null
+ working_dir:
+ description:
+ - Directory of your project ( see --working-dir )
+ required: true
+ default: null
+ aliases: [ "working-dir" ]
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible ( see --prefer-source )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "prefer-source" ]
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions ( see --prefer-dist )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "prefer-dist" ]
+ no_dev:
+ description:
+ - Disables installation of require-dev packages ( see --no-dev )
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ aliases: [ "no-dev" ]
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json ( see --no-scripts )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "no-scripts" ]
+ no_plugins:
+ description:
+ - Disables all plugins ( see --no-plugins )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "no-plugins" ]
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ aliases: [ "optimize-autoloader" ]
+ ignore_platform_reqs:
+ version_added: "2.0"
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "ignore-platform-reqs" ]
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid it.
+'''
+
+EXAMPLES = '''
+# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
+- composer: command=install working_dir=/path/to/project
+
+- composer:
+ command: "require"
+ arguments: "my/package"
+ working_dir: "/path/to/project"
+
+# Clone project and install with all dependencies
+- composer:
+ command: "create-project"
+ arguments: "package/package /path/to/project ~1.0"
+ working_dir: "/path/to/project"
+ prefer_dist: "yes"
+'''
+
+import os
+import re
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+def parse_out(string):
+ return re.sub("\s+", " ", string).strip()
+
+def has_changed(string):
+ return "Nothing to install or update" not in string
+
+def get_available_options(module, command='install'):
+ # get all availabe options from a composer command using composer help to json
+ rc, out, err = composer_command(module, "help %s --format=json" % command)
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+
+ command_help_json = json.loads(out)
+ return command_help_json['definition']['options']
+
+def composer_command(module, command, arguments = "", options=[]):
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ cmd = "%s %s %s %s %s" % (php_path, composer_path, command, " ".join(options), arguments)
+ return module.run_command(cmd)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ command = dict(default="install", type="str", required=False),
+ arguments = dict(default="", type="str", required=False),
+ working_dir = dict(aliases=["working-dir"], required=True),
+ prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
+ prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
+ no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
+ no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
+ no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
+ optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
+ ignore_platform_reqs = dict(default="no", type="bool", aliases=["ignore-platform-reqs"]),
+ ),
+ supports_check_mode=True
+ )
+
+ # Get composer command with fallback to default
+ command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
+ available_options = get_available_options(module=module, command=command)
+
+ options = []
+
+ # Default options
+ default_options = [
+ 'no-ansi',
+ 'no-interaction',
+ 'no-progress',
+ ]
+
+ for option in default_options:
+ if option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])])
+
+ option_params = {
+ 'prefer_source': 'prefer-source',
+ 'prefer_dist': 'prefer-dist',
+ 'no_dev': 'no-dev',
+ 'no_scripts': 'no-scripts',
+ 'no_plugins': 'no_plugins',
+ 'optimize_autoloader': 'optimize-autoloader',
+ 'ignore_platform_reqs': 'ignore-platform-reqs',
+ }
+
+ for param, option in option_params.iteritems():
+ if module.params.get(param) and option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if module.check_mode:
+ options.append('--dry-run')
+
+ rc, out, err = composer_command(module, command, arguments, options)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output, stdout=err)
+ else:
+ # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
+ output = parse_out(out + err)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out+err)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/language/cpanm.py b/lib/ansible/modules/extras/packaging/language/cpanm.py
new file mode 100644
index 0000000000..790a493915
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/cpanm.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Franck Cuny <franck@lumberjaph.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: cpanm
+short_description: Manages Perl library dependencies.
+description:
+ - Manage Perl library dependencies.
+version_added: "1.6"
+options:
+ name:
+ description:
+ - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
+ required: false
+ default: null
+ aliases: ["pkg"]
+ from_path:
+ description:
+ - The local directory from where to install
+ required: false
+ default: null
+ notest:
+ description:
+ - Do not run unit tests
+ required: false
+ default: false
+ locallib:
+ description:
+ - Specify the install base to install modules
+ required: false
+ default: false
+ mirror:
+ description:
+ - Specifies the base URL for the CPAN mirror to use
+ required: false
+ default: false
+ mirror_only:
+ description:
+ - Use the mirror's index file instead of the CPAN Meta DB
+ required: false
+ default: false
+ installdeps:
+ description:
+ - Only install dependencies
+ required: false
+ default: false
+ version_added: "2.0"
+ version:
+ description:
+ - minimum version of perl module to consider acceptable
+ required: false
+ default: false
+ version_added: "2.1"
+ system_lib:
+ description:
+ - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
+ - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
+ required: false
+ default: false
+ version_added: "2.0"
+ aliases: ['use_sudo']
+ executable:
+ description:
+ - Override the path to the cpanm executable
+ required: false
+ default: null
+ version_added: "2.1"
+notes:
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+author: "Franck Cuny (@franckcuny)"
+'''
+
+EXAMPLES = '''
+# install Dancer perl package
+- cpanm: name=Dancer
+
+# install version 0.99_05 of the Plack perl package
+- cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz
+
+# install Dancer into the specified locallib
+- cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib
+
+# install perl dependencies from local directory
+- cpanm: from_path=/srv/webapps/my_app/src/
+
+# install Dancer perl package without running the unit tests in indicated locallib
+- cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib
+
+# install Dancer perl package from a specific mirror
+- cpanm: name=Dancer mirror=http://cpan.cpantesters.org/
+
+# install Dancer perl package into the system root path
+- cpanm: name=Dancer system_lib=yes
+
+# install Dancer if it's not already installed
+# OR the installed version is older than version 1.0
+- cpanm: name=Dancer version=1.0
+'''
+
+def _is_package_installed(module, name, locallib, cpanm, version):
+ cmd = ""
+ if locallib:
+ os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
+ cmd = "%s perl -e ' use %s" % (cmd, name)
+ if version:
+ cmd = "%s %s;'" % (cmd, version)
+ else:
+ cmd = "%s;'" % cmd
+ res, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if res == 0:
+ return True
+ else:
+ return False
+
+def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
+ # this code should use "%s" like everything else and just return early but not fixing all of it now.
+ # don't copy stuff like this
+ if from_path:
+ cmd = cpanm + " " + from_path
+ else:
+ cmd = cpanm + " " + name
+
+ if notest is True:
+ cmd = cmd + " -n"
+
+ if locallib is not None:
+ cmd = cmd + " -l " + locallib
+
+ if mirror is not None:
+ cmd = cmd + " --mirror " + mirror
+
+ if mirror_only is True:
+ cmd = cmd + " --mirror-only"
+
+ if installdeps is True:
+ cmd = cmd + " --installdeps"
+
+ if use_sudo is True:
+ cmd = cmd + " --sudo"
+
+ return cmd
+
+
+def _get_cpanm_path(module):
+ if module.params['executable']:
+ return module.params['executable']
+ else:
+ return module.get_bin_path('cpanm', True)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, required=False, aliases=['pkg']),
+ from_path=dict(default=None, required=False, type='path'),
+ notest=dict(default=False, type='bool'),
+ locallib=dict(default=None, required=False, type='path'),
+ mirror=dict(default=None, required=False),
+ mirror_only=dict(default=False, type='bool'),
+ installdeps=dict(default=False, type='bool'),
+ system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
+ version=dict(default=None, required=False),
+ executable=dict(required=False, type='path'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[['name', 'from_path']],
+ )
+
+ cpanm = _get_cpanm_path(module)
+ name = module.params['name']
+ from_path = module.params['from_path']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
+ mirror_only = module.params['mirror_only']
+ installdeps = module.params['installdeps']
+ use_sudo = module.params['system_lib']
+ version = module.params['version']
+
+ changed = False
+
+ installed = _is_package_installed(module, name, locallib, cpanm, version)
+
+ if not installed:
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
+
+ rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
+
+ if rc_cpanm != 0:
+ module.fail_json(msg=err_cpanm, cmd=cmd)
+
+ if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
+ changed = True
+
+ module.exit_json(changed=changed, binary=cpanm, name=name)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/language/maven_artifact.py b/lib/ansible/modules/extras/packaging/language/maven_artifact.py
new file mode 100644
index 0000000000..1136f7aaaf
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/maven_artifact.py
@@ -0,0 +1,390 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
+#
+# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
+# as a reference and starting point.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+__author__ = 'cschmidt'
+
+from lxml import etree
+import os
+import hashlib
+import sys
+import posixpath
+import urlparse
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+DOCUMENTATION = '''
+---
+module: maven_artifact
+short_description: Downloads an Artifact from a Maven Repository
+version_added: "2.0"
+description:
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve
+ - snapshots or release versions of the artifact and will resolve the latest available version if one is not
+ - available.
+author: "Chris Schmidt (@chrisisbeef)"
+requirements:
+ - "python >= 2.6"
+ - lxml
+ - boto if using a S3 repository (s3://...)
+options:
+ group_id:
+ description:
+ - The Maven groupId coordinate
+ required: true
+ artifact_id:
+ description:
+ - The maven artifactId coordinate
+ required: true
+ version:
+ description:
+ - The maven version coordinate
+ required: false
+ default: latest
+ classifier:
+ description:
+ - The maven classifier coordinate
+ required: false
+ default: null
+ extension:
+ description:
+ - The maven type/extension coordinate
+ required: false
+ default: jar
+ repository_url:
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
+ required: false
+ default: http://repo1.maven.org/maven2
+ username:
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
+ required: false
+ default: null
+ aliases: [ "aws_secret_key" ]
+ password:
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
+ required: false
+ default: null
+ aliases: [ "aws_secret_access_key" ]
+ dest:
+ description:
+ - The path where the artifact should be written to
+ required: true
+ default: false
+ state:
+ description:
+ - The desired state of the artifact
+ required: true
+ default: present
+ choices: [present,absent]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: "1.9.3"
+'''
+
+EXAMPLES = '''
+# Download the latest version of the JUnit framework artifact from Maven Central
+- maven_artifact: group_id=junit artifact_id=junit dest=/tmp/junit-latest.jar
+
+# Download JUnit 4.11 from Maven Central
+- maven_artifact: group_id=junit artifact_id=junit version=4.11 dest=/tmp/junit-4.11.jar
+
+# Download an artifact from a private repository requiring authentication
+- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar
+
+# Download a WAR File to the Tomcat webapps directory to be deployed
+- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war
+'''
+
+class Artifact(object):
+ def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'):
+ if not group_id:
+ raise ValueError("group_id must be set")
+ if not artifact_id:
+ raise ValueError("artifact_id must be set")
+
+ self.group_id = group_id
+ self.artifact_id = artifact_id
+ self.version = version
+ self.classifier = classifier
+
+ if not extension:
+ self.extension = "jar"
+ else:
+ self.extension = extension
+
+ def is_snapshot(self):
+ return self.version and self.version.endswith("SNAPSHOT")
+
+ def path(self, with_version=True):
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
+ if with_version and self.version:
+ return posixpath.join(base, self.version)
+ else:
+ return base
+
+ def _generate_filename(self):
+ if not self.classifier:
+ return self.artifact_id + "." + self.extension
+ else:
+ return self.artifact_id + "-" + self.classifier + "." + self.extension
+
+ def get_filename(self, filename=None):
+ if not filename:
+ filename = self._generate_filename()
+ elif os.path.isdir(filename):
+ filename = os.path.join(filename, self._generate_filename())
+ return filename
+
+ def __str__(self):
+ if self.classifier:
+ return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
+ elif self.extension != "jar":
+ return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
+ else:
+ return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
+
+ @staticmethod
+ def parse(input):
+ parts = input.split(":")
+ if len(parts) >= 3:
+ g = parts[0]
+ a = parts[1]
+ v = parts[len(parts) - 1]
+ t = None
+ c = None
+ if len(parts) == 4:
+ t = parts[2]
+ if len(parts) == 5:
+ t = parts[2]
+ c = parts[3]
+ return Artifact(g, a, v, c, t)
+ else:
+ return None
+
+
+class MavenDownloader:
+ def __init__(self, module, base="http://repo1.maven.org/maven2"):
+ self.module = module
+ if base.endswith("/"):
+ base = base.rstrip("/")
+ self.base = base
+ self.user_agent = "Maven Artifact Downloader/1.0"
+
+ def _find_latest_version_available(self, artifact):
+ path = "/%s/maven-metadata.xml" % (artifact.path(False))
+ xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
+ v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
+ if v:
+ return v[0]
+
+ def find_uri_for_artifact(self, artifact):
+ if artifact.version == "latest":
+ artifact.version = self._find_latest_version_available(artifact)
+
+ if artifact.is_snapshot():
+ path = "/%s/maven-metadata.xml" % (artifact.path())
+ xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
+ timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
+ buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber))
+
+ return self._uri_for_artifact(artifact, artifact.version)
+
+ def _uri_for_artifact(self, artifact, version=None):
+ if artifact.is_snapshot() and not version:
+ raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
+ elif not artifact.is_snapshot():
+ version = artifact.version
+ if artifact.classifier:
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
+
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
+
+ def _request(self, url, failmsg, f):
+ url_to_use = url
+ parsed_url = urlparse(url)
+ if parsed_url.scheme=='s3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
+
+ # Hack to add parameters in the way that fetch_url expects
+ self.module.params['url_username'] = self.module.params.get('username', '')
+ self.module.params['url_password'] = self.module.params.get('password', '')
+ self.module.params['http_agent'] = self.module.params.get('user_agent', None)
+
+ response, info = fetch_url(self.module, url_to_use)
+ if info['status'] != 200:
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
+ else:
+ return f(response)
+
+
+ def download(self, artifact, filename=None):
+ filename = artifact.get_filename(filename)
+ if not artifact.version or artifact.version == "latest":
+ artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact),
+ artifact.classifier, artifact.extension)
+
+ url = self.find_uri_for_artifact(artifact)
+ if not self.verify_md5(filename, url + ".md5"):
+ response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r)
+ if response:
+ f = open(filename, 'w')
+ # f.write(response.read())
+ self._write_chunks(response, f, report_hook=self.chunk_report)
+ f.close()
+ return True
+ else:
+ return False
+ else:
+ return True
+
+ def chunk_report(self, bytes_so_far, chunk_size, total_size):
+ percent = float(bytes_so_far) / total_size
+ percent = round(percent * 100, 2)
+ sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
+ (bytes_so_far, total_size, percent))
+
+ if bytes_so_far >= total_size:
+ sys.stdout.write('\n')
+
+ def _write_chunks(self, response, file, chunk_size=8192, report_hook=None):
+ total_size = response.info().getheader('Content-Length').strip()
+ total_size = int(total_size)
+ bytes_so_far = 0
+
+ while 1:
+ chunk = response.read(chunk_size)
+ bytes_so_far += len(chunk)
+
+ if not chunk:
+ break
+
+ file.write(chunk)
+ if report_hook:
+ report_hook(bytes_so_far, chunk_size, total_size)
+
+ return bytes_so_far
+
+ def verify_md5(self, file, remote_md5):
+ if not os.path.exists(file):
+ return False
+ else:
+ local_md5 = self._local_md5(file)
+ remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read())
+ return local_md5 == remote
+
+ def _local_md5(self, file):
+ md5 = hashlib.md5()
+ f = open(file, 'rb')
+ for chunk in iter(lambda: f.read(8192), ''):
+ md5.update(chunk)
+ f.close()
+ return md5.hexdigest()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ group_id = dict(default=None),
+ artifact_id = dict(default=None),
+ version = dict(default="latest"),
+ classifier = dict(default=None),
+ extension = dict(default='jar'),
+ repository_url = dict(default=None),
+ username = dict(default=None,aliases=['aws_secret_key']),
+ password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']),
+ state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state
+ dest = dict(type="path", default=None),
+ validate_certs = dict(required=False, default=True, type='bool'),
+ )
+ )
+
+ try:
+ parsed_url = urlparse(module.params["repository_url"])
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ if parsed_url.scheme=='s3' and not HAS_BOTO:
+ module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')
+
+ group_id = module.params["group_id"]
+ artifact_id = module.params["artifact_id"]
+ version = module.params["version"]
+ classifier = module.params["classifier"]
+ extension = module.params["extension"]
+ repository_url = module.params["repository_url"]
+ repository_username = module.params["username"]
+ repository_password = module.params["password"]
+ state = module.params["state"]
+ dest = module.params["dest"]
+
+ if not repository_url:
+ repository_url = "http://repo1.maven.org/maven2"
+
+ #downloader = MavenDownloader(module, repository_url, repository_username, repository_password)
+ downloader = MavenDownloader(module, repository_url)
+
+ try:
+ artifact = Artifact(group_id, artifact_id, version, classifier, extension)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ prev_state = "absent"
+ if os.path.isdir(dest):
+ dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension)
+ if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'):
+ prev_state = "present"
+ else:
+ path = os.path.dirname(dest)
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+ if prev_state == "present":
+ module.exit_json(dest=dest, state=state, changed=False)
+
+ try:
+ if downloader.download(artifact, dest):
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True)
+ else:
+ module.fail_json(msg="Unable to download the artifact")
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/language/npm.py b/lib/ansible/modules/extras/packaging/language/npm.py
new file mode 100644
index 0000000000..e15bbea903
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/npm.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Chris Hoffman <christopher.hoffman@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: npm
+short_description: Manage node.js packages with npm
+description:
+ - Manage node.js packages with Node Package Manager (npm)
+version_added: 1.2
+author: "Chris Hoffman (@chrishoffman)"
+options:
+ name:
+ description:
+ - The name of a node.js library to install
+ required: false
+ path:
+ description:
+ - The base path where to install the node.js libraries
+ required: false
+ version:
+ description:
+ - The version to be installed
+ required: false
+ global:
+ description:
+ - Install the node.js library globally
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ executable:
+ description:
+ - The executable location for npm.
+ - This is useful if you are using a version manager, such as nvm
+ required: false
+ ignore_scripts:
+ description:
+ - Use the --ignore-scripts flag when installing.
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ version_added: "1.8"
+ production:
+ description:
+ - Install dependencies in production mode, excluding devDependencies
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ version_added: "1.6"
+ state:
+ description:
+ - The state of the node.js library
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+'''
+
+EXAMPLES = '''
+description: Install "coffee-script" node.js package.
+- npm: name=coffee-script path=/app/location
+
+description: Install "coffee-script" node.js package on version 1.6.1.
+- npm: name=coffee-script version=1.6.1 path=/app/location
+
+description: Install "coffee-script" node.js package globally.
+- npm: name=coffee-script global=yes
+
+description: Remove the globally package "coffee-script".
+- npm: name=coffee-script global=yes state=absent
+
+description: Install "coffee-script" node.js package from custom registry.
+- npm: name=coffee-script registry=http://registry.mysite.com
+
+description: Install packages based on package.json.
+- npm: path=/app/location
+
+description: Update packages based on package.json to their latest version.
+- npm: path=/app/location state=latest
+
+description: Install packages based on package.json using the npm installed with nvm v0.10.1.
+- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present
+'''
+
+import os
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+class Npm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.glbl = kwargs['glbl']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('npm', True)]
+
+ if kwargs['version']:
+ self.name_version = self.name + '@' + str(self.version)
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.glbl:
+ cmd.append('--global')
+ if self.production:
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.name:
+ cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ #If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
+ missing.append(dep)
+ elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
+ missing.append(dep)
+ else:
+ installed.append(dep)
+ if self.name and self.name not in installed:
+ missing.append(self.name)
+ #Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+ def list_outdated(self):
+ outdated = list()
+ data = self._exec(['outdated'], True, False)
+ for dep in data.splitlines():
+ if dep:
+ # node.js v0.10.22 changed the `npm outdated` module separator
+ # from "@" to " ". Split on both for backwards compatibility.
+ pkg, other = re.split('\s|@', dep, 1)
+ outdated.append(pkg)
+
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ path=dict(default=None, type='path'),
+ version=dict(default=None),
+ production=dict(default='no', type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default='no', type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ glbl = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+
+ if not path and not glbl:
+ module.fail_json(msg='path must be specified when not using global')
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \
+ executable=executable, registry=registry, ignore_scripts=ignore_scripts)
+
+ changed = False
+ if state == 'present':
+ installed, missing = npm.list()
+ if len(missing):
+ changed = True
+ npm.install()
+ elif state == 'latest':
+ installed, missing = npm.list()
+ outdated = npm.list_outdated()
+ if len(missing):
+ changed = True
+ npm.install()
+ if len(outdated):
+ changed = True
+ npm.update()
+ else: #absent
+ installed, missing = npm.list()
+ if name in installed:
+ changed = True
+ npm.uninstall()
+
+ module.exit_json(changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/language/pear.py b/lib/ansible/modules/extras/packaging/language/pear.py
new file mode 100644
index 0000000000..5762f9c815
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/language/pear.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Afterburn <http://github.com/afterburn>
+# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: pear
+short_description: Manage pear/pecl packages
+description:
+ - Manage PHP packages with the pear package manager.
+version_added: 2.0
+author:
+ - "'jonathan.lestrelin' <jonathan.lestrelin@gmail.com>"
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: true
+
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: "present"
+ choices: ["present", "absent", "latest"]
+'''
+
+EXAMPLES = '''
+# Install pear package
+- pear: name=Net_URL2 state=present
+
+# Install pecl package
+- pear: name=pecl/json_post state=present
+
+# Upgrade package
+- pear: name=Net_URL2 state=latest
+
+# Remove packages
+- pear: name=Net_URL2,pecl/json_post state=absent
+'''
+
+import os
+
+def get_local_version(pear_output):
+ """Take pear remoteinfo output and get the installed version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Installed ' in line:
+ installed = line.rsplit(None, 1)[-1].strip()
+ if installed == '-': continue
+ return installed
+ return None
+
+def get_repository_version(pear_output):
+ """Take pear remote-info output and get the latest version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Latest ' in line:
+ return line.rsplit(None, 1)[-1].strip()
+ return None
+
+def query_package(module, name, state="present"):
+ """Query the package status in both the local system and the repository.
+ Returns a boolean to indicate if the package is installed,
+ and a second boolean to indicate if the package is up-to-date."""
+ if state == "present":
+ lcmd = "pear info %s" % (name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False
+
+ rcmd = "pear remote-info %s" % (name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+
+ # get the version installed locally (if any)
+ lversion = get_local_version(rstdout)
+
+ # get the version in the repository
+ rversion = get_repository_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally,
+ # and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion)
+
+ return False, False
+
+
+def remove_packages(module, packages):
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, package)
+ if not installed:
+ continue
+
+ cmd = "pear uninstall %s" % (package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, state, packages):
+ install_c = 0
+
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present
+ # or state == latest and is up-to-date then skip
+ installed, updated = query_package(module, package)
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if state == 'present':
+ command = 'install'
+
+ if state == 'latest':
+ command = 'upgrade'
+
+ cmd = "pear %s %s" % (command, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already installed")
+
+
+def check_packages(module, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(change=False, msg="package(s) already %s" % state)
+
+
+def exe_exists(program):
+ for path in os.environ["PATH"].split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
+ return True
+
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(aliases=['pkg']),
+ state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])),
+ required_one_of = [['name']],
+ supports_check_mode = True)
+
+ if not exe_exists("pear"):
+ module.fail_json(msg="cannot find pear executable in PATH")
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/__init__.py b/lib/ansible/modules/extras/packaging/os/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/__init__.py
diff --git a/lib/ansible/modules/extras/packaging/os/apk.py b/lib/ansible/modules/extras/packaging/os/apk.py
new file mode 100644
index 0000000000..911e50e094
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/apk.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
+# Based on pacman (Afterburn <http://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
+# and apt (Matthew Williams <matthew@flowroute.com>>) modules.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: apk
+short_description: Manages apk packages
+description:
+ - Manages I(apk) packages for Alpine Linux.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - A package name, like C(foo), or mutliple packages, like C(foo, bar).
+ required: false
+ default: null
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - C(present) ensures the package(s) is/are present.
+ - C(absent) ensures the package(s) is/are absent.
+ - C(latest) ensures the package(s) is/are present and the latest version(s).
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ update_cache:
+ description:
+ - Update repository indexes. Can be run with other steps or on it's own.
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ upgrade:
+ description:
+ - Upgrade all installed packages to their latest version.
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+notes:
+ - '"name" and "upgrade" are mutually exclusive.'
+'''
+
+EXAMPLES = '''
+# Update repositories and install "foo" package
+- apk: name=foo update_cache=yes
+
+# Update repositories and install "foo" and "bar" packages
+- apk: name=foo,bar update_cache=yes
+
+# Remove "foo" package
+- apk: name=foo state=absent
+
+# Remove "foo" and "bar" packages
+- apk: name=foo,bar state=absent
+
+# Install the package "foo"
+- apk: name=foo state=present
+
+# Install the packages "foo" and "bar"
+- apk: name=foo,bar state=present
+
+# Update repositories and update package "foo" to latest version
+- apk: name=foo state=latest update_cache=yes
+
+# Update repositories and update packages "foo" and "bar" to latest versions
+- apk: name=foo,bar state=latest update_cache=yes
+
+# Update all installed packages to the latest versions
+- apk: upgrade=yes
+
+# Update repositories as a separate step
+- apk: update_cache=yes
+'''
+
+import os
+import re
+
+def update_package_db(module):
+ cmd = "%s update" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not update package db")
+
+def query_package(module, name):
+ cmd = "%s -v info --installed %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+def query_latest(module, name):
+ cmd = "%s version %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = "(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (name)
+ match = re.search(search_pattern, stdout)
+ if match and match.group(2) == "<":
+ return False
+ return True
+
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = "^%s: virtual meta package" % (name)
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
+def upgrade_packages(module):
+ if module.check_mode:
+ cmd = "%s upgrade --simulate" % (APK_PATH)
+ else:
+ cmd = "%s upgrade" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="failed to upgrade packages")
+ if re.search('^OK', stdout):
+ module.exit_json(changed=False, msg="packages already upgraded")
+ module.exit_json(changed=True, msg="upgraded packages")
+
+def install_packages(module, names, state):
+ upgrade = False
+ to_install = []
+ to_upgrade = []
+ for name in names:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_package(module, name):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
+ module.exit_json(changed=False, msg="package(s) already installed")
+ packages = " ".join(to_install) + " ".join(to_upgrade)
+ if upgrade:
+ if module.check_mode:
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
+ else:
+ if module.check_mode:
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add %s" % (APK_PATH, packages)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (packages))
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages))
+
+def remove_packages(module, names):
+ installed = []
+ for name in names:
+ if query_package(module, name):
+ installed.append(name)
+ if not installed:
+ module.exit_json(changed=False, msg="package(s) already removed")
+ names = " ".join(installed)
+ if module.check_mode:
+ cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
+ else:
+ cmd = "%s del --purge %s" % (APK_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s package(s)" % (names))
+ module.exit_json(changed=True, msg="removed %s package(s)" % (names))
+
+# ==========================================
+# Main control flow.
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
+ name = dict(type='list'),
+ update_cache = dict(default='no', type='bool'),
+ upgrade = dict(default='no', type='bool'),
+ ),
+ required_one_of = [['name', 'update_cache', 'upgrade']],
+ mutually_exclusive = [['name', 'upgrade']],
+ supports_check_mode = True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ global APK_PATH
+ APK_PATH = module.get_bin_path('apk', required=True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ if p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_package_db(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='updated repository indexes')
+
+ if p['upgrade']:
+ upgrade_packages(module)
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['name'], p['state'])
+ elif p['state'] == 'absent':
+ remove_packages(module, p['name'])
+
+# Import module snippets.
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/dnf.py b/lib/ansible/modules/extras/packaging/os/dnf.py
new file mode 100644
index 0000000000..8df9401fa1
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/dnf.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015 Cristian van Ee <cristian at cvee.org>
+# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: dnf
+version_added: 1.9
+short_description: Manages packages with the I(dnf) package manager
+description:
+ - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
+options:
+ name:
+ description:
+ - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
+ required: true
+ default: null
+ aliases: []
+
+ list:
+ description:
+ - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
+ required: false
+ default: null
+
+ state:
+ description:
+ - Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: false
+ choices: [ "present", "latest", "absent" ]
+ default: "present"
+
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+ required: false
+ default: null
+ aliases: []
+
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+ required: false
+ default: null
+ aliases: []
+
+ conf_file:
+ description:
+ - The remote dnf configuration file to use for the transaction.
+ required: false
+ default: null
+ aliases: []
+
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ aliases: []
+
+notes: []
+# informational: requirements for nodes
+requirements:
+ - "python >= 2.6"
+ - python-dnf
+author:
+ - '"Igor Gnatenko (@ignatenkobrain)" <i.gnatenko.brain@gmail.com>'
+ - '"Cristian van Ee (@DJMuggs)" <cristian at cvee.org>'
+'''
+
+EXAMPLES = '''
+- name: install the latest version of Apache
+ dnf: name=httpd state=latest
+
+- name: remove the Apache package
+ dnf: name=httpd state=absent
+
+- name: install the latest version of Apache from the testing repo
+ dnf: name=httpd enablerepo=testing state=present
+
+- name: upgrade all packages
+ dnf: name=* state=latest
+
+- name: install the nginx rpm from a remote repo
+ dnf: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
+
+- name: install nginx rpm from a local file
+ dnf: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
+
+- name: install the 'Development tools' package group
+ dnf: name="@Development tools" state=present
+
+'''
+import os
+
+try:
+ import dnf
+ from dnf import cli, const, exceptions, subject, util
+ HAS_DNF = True
+except ImportError:
+ HAS_DNF = False
+
+
+def _fail_if_no_dnf(module):
+ """Fail if unable to import dnf."""
+ if not HAS_DNF:
+ module.fail_json(
+ msg="`python2-dnf` is not installed, but it is required for the Ansible dnf module.")
+
+
+def _configure_base(module, base, conf_file, disable_gpg_check):
+ """Configure the dnf Base object."""
+ conf = base.conf
+
+ # Turn off debug messages in the output
+ conf.debuglevel = 0
+
+ # Set whether to check gpg signatures
+ conf.gpgcheck = not disable_gpg_check
+
+ # Don't prompt for user confirmations
+ conf.assumeyes = True
+
+ # Change the configuration file path if provided
+ if conf_file:
+ # Fail if we can't read the configuration file.
+ if not os.access(conf_file, os.R_OK):
+ module.fail_json(
+ msg="cannot read configuration file", conf_file=conf_file)
+ else:
+ conf.config_file_path = conf_file
+
+ # Read the configuration file
+ conf.read()
+
+
+def _specify_repositories(base, disablerepo, enablerepo):
+ """Enable and disable repositories matching the provided patterns."""
+ base.read_all_repos()
+ repos = base.repos
+
+ # Disable repositories
+ for repo_pattern in disablerepo:
+ for repo in repos.get_matching(repo_pattern):
+ repo.disable()
+
+ # Enable repositories
+ for repo_pattern in enablerepo:
+ for repo in repos.get_matching(repo_pattern):
+ repo.enable()
+
+
+def _base(module, conf_file, disable_gpg_check, disablerepo, enablerepo):
+ """Return a fully configured dnf Base object."""
+ base = dnf.Base()
+ _configure_base(module, base, conf_file, disable_gpg_check)
+ _specify_repositories(base, disablerepo, enablerepo)
+ base.fill_sack()
+ return base
+
+
+def _package_dict(package):
+ """Return a dictionary of information for the package."""
+ # NOTE: This no longer contains the 'dnfstate' field because it is
+ # already known based on the query type.
+ result = {
+ 'name': package.name,
+ 'arch': package.arch,
+ 'epoch': str(package.epoch),
+ 'release': package.release,
+ 'version': package.version,
+ 'repo': package.repoid}
+ result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
+ **result)
+
+ return result
+
+
+def list_items(module, base, command):
+ """List package info based on the command."""
+ # Rename updates to upgrades
+ if command == 'updates':
+ command = 'upgrades'
+
+ # Return the corresponding packages
+ if command in ['installed', 'upgrades', 'available']:
+ results = [
+ _package_dict(package)
+ for package in getattr(base.sack.query(), command)()]
+ # Return the enabled repository ids
+ elif command in ['repos', 'repositories']:
+ results = [
+ {'repoid': repo.id, 'state': 'enabled'}
+ for repo in base.repos.iter_enabled()]
+ # Return any matching packages
+ else:
+ packages = subject.Subject(command).get_best_query(base.sack)
+ results = [_package_dict(package) for package in packages]
+
+ module.exit_json(results=results)
+
+
+def _mark_package_install(module, base, pkg_spec):
+ """Mark the package for install."""
+ try:
+ base.install(pkg_spec)
+ except exceptions.MarkingError:
+ module.fail_json(msg="No package {} available.".format(pkg_spec))
+
+
+def ensure(module, base, state, names):
+ allow_erasing = False
+ if names == ['*'] and state == 'latest':
+ base.upgrade_all()
+ else:
+ pkg_specs, group_specs, filenames = cli.commands.parse_spec_group_file(
+ names)
+ if group_specs:
+ base.read_comps()
+
+ groups = []
+ for group_spec in group_specs:
+ group = base.comps.group_by_pattern(group_spec)
+ if group:
+ groups.append(group)
+ else:
+ module.fail_json(
+ msg="No group {} available.".format(group_spec))
+
+ if state in ['installed', 'present']:
+ # Install files.
+ for filename in filenames:
+ base.package_install(base.add_remote_rpm(filename))
+ # Install groups.
+ for group in groups:
+ base.group_install(group, const.GROUP_PACKAGE_TYPES)
+ # Install packages.
+ for pkg_spec in pkg_specs:
+ _mark_package_install(module, base, pkg_spec)
+
+ elif state == 'latest':
+ # "latest" is same as "installed" for filenames.
+ for filename in filenames:
+ base.package_install(base.add_remote_rpm(filename))
+ for group in groups:
+ try:
+ base.group_upgrade(group)
+ except exceptions.CompsError:
+ # If not already installed, try to install.
+ base.group_install(group, const.GROUP_PACKAGE_TYPES)
+ for pkg_spec in pkg_specs:
+ # best effort causes to install the latest package
+ # even if not previously installed
+ base.conf.best = True
+ base.install(pkg_spec)
+
+ else:
+ # state == absent
+ if filenames:
+ module.fail_json(
+ msg="Cannot remove paths -- please specify package name.")
+
+ installed = base.sack.query().installed()
+ for group in groups:
+ if installed.filter(name=group.name):
+ base.group_remove(group)
+ for pkg_spec in pkg_specs:
+ if installed.filter(name=pkg_spec):
+ base.remove(pkg_spec)
+ # Like the dnf CLI we want to allow recursive removal of dependent
+ # packages
+ allow_erasing = True
+
+ if not base.resolve(allow_erasing=allow_erasing):
+ module.exit_json(msg="Nothing to do")
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ base.download_packages(base.transaction.install_set)
+ base.do_transaction()
+ response = {'changed': True, 'results': []}
+ for package in base.transaction.install_set:
+ response['results'].append("Installed: {}".format(package))
+ for package in base.transaction.remove_set:
+ response['results'].append("Removed: {}".format(package))
+
+ module.exit_json(**response)
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], type='list'),
+ state=dict(
+ default='installed',
+ choices=[
+ 'absent', 'present', 'installed', 'removed', 'latest']),
+ enablerepo=dict(type='list', default=[]),
+ disablerepo=dict(type='list', default=[]),
+ list=dict(),
+ conf_file=dict(default=None, type='path'),
+ disable_gpg_check=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['name', 'list']],
+ mutually_exclusive=[['name', 'list']],
+ supports_check_mode=True)
+ params = module.params
+
+ _fail_if_no_dnf(module)
+ if params['list']:
+ base = _base(
+ module, params['conf_file'], params['disable_gpg_check'],
+ params['disablerepo'], params['enablerepo'])
+ list_items(module, base, params['list'])
+ else:
+ # Note: base takes a long time to run so we want to check for failure
+ # before running it.
+ if not util.am_i_root():
+ module.fail_json(msg="This command has to be run under the root user.")
+ base = _base(
+ module, params['conf_file'], params['disable_gpg_check'],
+ params['disablerepo'], params['enablerepo'])
+
+ ensure(module, base, params['state'], params['name'])
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/homebrew.py b/lib/ansible/modules/extras/packaging/os/homebrew.py
new file mode 100755
index 0000000000..fa61984e0f
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/homebrew.py
@@ -0,0 +1,870 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on macports (Jimmy Tang <jcftang@gmail.com>)
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: homebrew
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Andrew Dunham (@andrew-d)"
+requirements:
+ - "python >= 2.6"
+short_description: Package manager for Homebrew
+description:
+ - Manages Homebrew packages
+version_added: "1.1"
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: false
+ default: None
+ aliases: ['pkg', 'package', 'formula']
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ required: false
+ default: '/usr/local/bin'
+ state:
+ description:
+ - state of the package
+ choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ]
+ required: false
+ default: present
+ update_homebrew:
+ description:
+ - update homebrew itself first
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ aliases: ['update-brew']
+ upgrade_all:
+ description:
+ - upgrade all homebrew packages
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ aliases: ['upgrade']
+ install_options:
+ description:
+ - options flags to install a package
+ required: false
+ default: null
+ aliases: ['options']
+ version_added: "1.4"
+notes: []
+'''
+EXAMPLES = '''
+# Install formula foo with 'brew' in default path (C(/usr/local/bin))
+- homebrew: name=foo state=present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- homebrew: name=foo path=/my/other/location/bin state=present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- homebrew: name=foo state=present update_homebrew=yes
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- homebrew: name=foo state=latest update_homebrew=yes
+
+# Update homebrew and upgrade all packages
+- homebrew: update_homebrew=yes upgrade_all=yes
+
+# Miscellaneous other examples
+- homebrew: name=foo state=head
+- homebrew: name=foo state=linked
+- homebrew: name=foo state=absent
+- homebrew: name=foo,bar state=absent
+- homebrew: name=foo state=present install_options=with-baz,enable-debug
+'''
+
+import os.path
+import re
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \+ # plusses
+ - # dashes
+ : # colons (for URLs)
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, basestring):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, basestring)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
+
+ if package is None:
+ return True
+
+ return (
+ isinstance(package, basestring)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, basestring)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, basestring):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path, packages=None, state=None,
+ update_homebrew=False, upgrade_all=False,
+ install_options=None):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all,
+ install_options=install_options, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
+
+ return False
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ self.current_package,
+ ])
+
+ return rc != 0
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.upgrade_all:
+ self._upgrade_all()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, basestring):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'upgrade',
+ ])
+ if rc == 0:
+ if not out:
+ self.message = 'Homebrew packages already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
+
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ ),
+ path=dict(
+ default="/usr/local/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ packages = p['name']
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head', ):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ upgrade_all = p['upgrade_all']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all, install_options=install_options)
+ (failed, changed, message) = brew.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/homebrew_cask.py b/lib/ansible/modules/extras/packaging/os/homebrew_cask.py
new file mode 100755
index 0000000000..debcb788ea
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/homebrew_cask.py
@@ -0,0 +1,586 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Enric Lluelles (@enriclluelles)"
+requirements:
+ - "python >= 2.6"
+short_description: Install/uninstall homebrew casks.
+description:
+ - Manages Homebrew casks.
+version_added: "1.6"
+options:
+ name:
+ description:
+ - name of cask to install/remove
+ required: true
+ aliases: ['pkg', 'package', 'cask']
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ required: false
+ default: '/usr/local/bin'
+ state:
+ description:
+ - state of the cask
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ update_homebrew:
+ description:
+ - update homebrew itself first. Note that C(brew cask update) is
+ a synonym for C(brew update).
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ aliases: ['update-brew']
+ version_added: "2.2"
+ install_options:
+ description:
+ - options flags to install a package
+ required: false
+ default: null
+ aliases: ['options']
+ version_added: "2.2"
+'''
+EXAMPLES = '''
+- homebrew_cask: name=alfred state=present
+- homebrew_cask: name=alfred state=absent
+- homebrew_cask: name=alfred state=present install_options="appdir=/Applications"
+- homebrew_cask: name=alfred state=present install_options="debug,appdir=/Applications"
+- homebrew_cask: name=alfred state=absent install_options="force"
+'''
+
+import os.path
+import re
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ - # dashes
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, basestring):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, basestring)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, basestring)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, basestring)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, basestring):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=path, casks=None, state=None,
+ update_homebrew=False, install_options=None):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state, update_homebrew=update_homebrew,
+ install_options=install_options,)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "cask",
+ "list"
+ ]
+ rc, out, err = self.module.run_command(cmd)
+
+ if 'nothing to list' in err:
+ return False
+ elif rc == 0:
+ casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()]
+ return self.current_cask in casks
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ if self.command:
+ return self._command()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, basestring):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ opts = (
+ [self.brew_path, 'cask', 'install', self.current_cask]
+ + self.install_options
+ )
+
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ cmd = [opt
+ for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask)
+ if opt]
+
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ ),
+ path=dict(
+ default="/usr/local/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ casks = p['name']
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state, update_homebrew=update_homebrew,
+ install_options=install_options)
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/homebrew_tap.py b/lib/ansible/modules/extras/packaging/os/homebrew_tap.py
new file mode 100644
index 0000000000..9264db8775
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/homebrew_tap.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+DOCUMENTATION = '''
+---
+module: homebrew_tap
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+short_description: Tap a Homebrew repository.
+description:
+ - Tap external Homebrew repositories.
+version_added: "1.6"
+options:
+ name:
+ description:
+ - The GitHub user/organization repository to tap.
+ required: true
+ aliases: ['tap']
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ required: false
+ version_added: "2.2"
+ note:
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = '''
+homebrew_tap: name=homebrew/dupes
+homebrew_tap: name=homebrew/dupes state=absent
+homebrew_tap: name=homebrew/dupes,homebrew/science state=present
+homebrew_tap: name=telemachus/brew url=https://bitbucket.org/telemachus/brew
+'''
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ tap_name = re.sub('homebrew-', '', tap.lower())
+
+ return tap_name in taps
+
+
+def add_tap(module, brew_path, tap, url=None):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ url,
+ ])
+ if already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s' % tap
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, unchanged, added, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s' % tap
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, unchanged, removed, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], type='list', required=True),
+ url=dict(default=None, required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=['/usr/local/bin']
+ )
+
+ taps = module.params['name']
+ url = module.params['url']
+
+ if module.params['state'] == 'present':
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of muliple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/layman.py b/lib/ansible/modules/extras/packaging/os/layman.py
new file mode 100644
index 0000000000..ac6acd12d4
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/layman.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import shutil
+from os import path
+
+DOCUMENTATION = '''
+---
+module: layman
+author: "Jakub Jirutka (@jirutka)"
+version_added: "1.6"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+requirements:
+ - "python >= 2.6"
+ - layman python module
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
+ required: true
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ required: false
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ required: false
+ default: present
+ choices: [present, absent, updated]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists. Prior to 1.9.3 the code
+ defaulted to C(no).
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: '1.9.3'
+'''
+
+EXAMPLES = '''
+# Install the overlay 'mozilla' which is on the central overlays list.
+- layman: name=mozilla
+
+# Install the overlay 'cvut' from the specified alternative list.
+- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml
+
+# Update (sync) the overlay 'cvut', or install if not installed yet.
+- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated
+
+# Update (sync) all of the installed overlays.
+- layman: name=ALL state=updated
+
+# Uninstall the overlay 'cvut'.
+- layman: name=cvut state=absent
+'''
+
+USERAGENT = 'ansible-httpget'
+
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ HAS_LAYMAN_API = False
+
+
+class ModuleError(Exception): pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None:
+ config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(module, url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+
+ # Hack to add params in the form that fetch_url expects
+ module.params['http_agent'] = USERAGENT
+ response, info = fetch_url(module, url)
+ if info['status'] != 200:
+ raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError as e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(module, name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would add layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ if not layman.is_repo(name):
+ if not list_url:
+ raise ModuleError("Overlay '%s' is not on the list of known " \
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(module, list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name):
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(module, name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would remove layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ layman.delete_repos(name)
+ if layman.get_errors(): raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [ str(item[1]) for item in layman.sync_results[2] ]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ list_url = dict(aliases=['url']),
+ state = dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs = dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg='Layman is not installed')
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(module, name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(module, name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(module, name)
+
+ except ModuleError as e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/macports.py b/lib/ansible/modules/extras/packaging/os/macports.py
new file mode 100644
index 0000000000..ca3a0f9742
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/macports.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jimmy Tang <jcftang@gmail.com>
+# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
+# (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: macports
+author: "Jimmy Tang (@jcftang)"
+short_description: Package manager for MacPorts
+description:
+ - Manages MacPorts packages
+version_added: "1.1"
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent', 'active', 'inactive' ]
+ required: false
+ default: present
+ update_cache:
+ description:
+ - update the package db first
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+notes: []
+'''
+EXAMPLES = '''
+- macports: name=foo state=present
+- macports: name=foo state=present update_cache=yes
+- macports: name=foo state=absent
+- macports: name=foo state=active
+- macports: name=foo state=inactive
+'''
+
+import pipes
+
+def update_package_db(module, port_path):
+ """ Updates packages list. """
+
+ rc, out, err = module.run_command("%s sync" % port_path)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def query_package(module, port_path, name, state="present"):
+ """ Returns whether a package is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+ elif state == "active":
+
+ rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, port_path, packages):
+ """ Uninstalls one or more packages if installed. """
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, port_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s uninstall %s" % (port_path, package))
+
+ if query_package(module, port_path, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, port_path, packages):
+ """ Installs one or more packages if not already installed. """
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, port_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s install %s" % (port_path, package))
+
+ if not query_package(module, port_path, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def activate_packages(module, port_path, packages):
+ """ Activate a package if it's inactive. """
+
+ activate_c = 0
+
+ for package in packages:
+ if not query_package(module, port_path, package):
+ module.fail_json(msg="failed to activate %s, package(s) not present" % (package))
+
+ if query_package(module, port_path, package, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s activate %s" % (port_path, package))
+
+ if not query_package(module, port_path, package, state="active"):
+ module.fail_json(msg="failed to activate %s: %s" % (package, out))
+
+ activate_c += 1
+
+ if activate_c > 0:
+ module.exit_json(changed=True, msg="activated %s package(s)" % (activate_c))
+
+ module.exit_json(changed=False, msg="package(s) already active")
+
+
+def deactivate_packages(module, port_path, packages):
+ """ Deactivate a package if it's active. """
+
+ deactivated_c = 0
+
+ for package in packages:
+ if not query_package(module, port_path, package):
+ module.fail_json(msg="failed to activate %s, package(s) not present" % (package))
+
+ if not query_package(module, port_path, package, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s deactivate %s" % (port_path, package))
+
+ if query_package(module, port_path, package, state="active"):
+ module.fail_json(msg="failed to deactivated %s: %s" % (package, out))
+
+ deactivated_c += 1
+
+ if deactivated_c > 0:
+ module.exit_json(changed=True, msg="deactivated %s package(s)" % (deactivated_c))
+
+ module.exit_json(changed=False, msg="package(s) already inactive")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(aliases=["pkg"], required=True),
+ state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ update_cache = dict(default="no", aliases=["update-cache"], type='bool')
+ )
+ )
+
+ port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
+
+ p = module.params
+
+ if p["update_cache"]:
+ update_package_db(module, port_path)
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] in ["present", "installed"]:
+ install_packages(module, port_path, pkgs)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_packages(module, port_path, pkgs)
+
+ elif p["state"] == "active":
+ activate_packages(module, port_path, pkgs)
+
+ elif p["state"] == "inactive":
+ deactivate_packages(module, port_path, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/openbsd_pkg.py b/lib/ansible/modules/extras/packaging/os/openbsd_pkg.py
new file mode 100644
index 0000000000..59fdd35c26
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/openbsd_pkg.py
@@ -0,0 +1,522 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrik Lundin <patrik@sigterm.se>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import platform
+import re
+import shlex
+import sqlite3
+
+from distutils.version import StrictVersion
+
+DOCUMENTATION = '''
+---
+module: openbsd_pkg
+author: "Patrik Lundin (@eest)"
+version_added: "1.1"
+short_description: Manage packages on OpenBSD.
+description:
+ - Manage packages on OpenBSD using the pkg tools.
+requirements: [ "python >= 2.5" ]
+options:
+ name:
+ required: true
+ description:
+ - Name of the package.
+ state:
+ required: true
+ choices: [ present, latest, absent ]
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ build:
+ required: false
+ choices: [ yes, no ]
+ default: no
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ version_added: "2.1"
+ ports_dir:
+ required: false
+ default: /usr/ports
+ description:
+ - When used in combination with the 'build' option, allows overriding
+ the default ports source directory.
+ version_added: "2.1"
+'''
+
+EXAMPLES = '''
+# Make sure nmap is installed
+- openbsd_pkg: name=nmap state=present
+
+# Make sure nmap is the latest version
+- openbsd_pkg: name=nmap state=latest
+
+# Make sure nmap is not installed
+- openbsd_pkg: name=nmap state=absent
+
+# Make sure nmap is installed, build it from source if it is not
+- openbsd_pkg: name=nmap state=present build=yes
+
+# Specify a pkg flavour with '--'
+- openbsd_pkg: name=vim--no_x11 state=present
+
+# Specify the default flavour to avoid ambiguity errors
+- openbsd_pkg: name=vim-- state=present
+
+# Specify a package branch (requires at least OpenBSD 6.0)
+- openbsd_pkg: name=python%3.5 state=present
+
+# Update all packages on the system
+- openbsd_pkg: name=* state=latest
+'''
+
+# Function used for executing commands.
+def execute_command(cmd, module):
+ # Break command line into arguments.
+ # This makes run_command() use shell=False which we need to not cause shell
+ # expansion of special characters like '*'.
+ cmd_args = shlex.split(cmd)
+ return module.run_command(cmd_args)
+
+# Function used to find out if a package is currently installed.
+def get_package_state(name, pkg_spec, module):
+ info_cmd = 'pkg_info -Iq'
+
+ command = "%s inst:%s" % (info_cmd, name)
+
+ rc, stdout, stderr = execute_command(command, module)
+
+ if stderr:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
+
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec['installed_names'] = [name for name in stdout.splitlines()]
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec['installed_names'])
+ return True
+ else:
+ return False
+
+# Function used to make sure a package is present.
+def package_present(name, installed_state, pkg_spec, module):
+ build = module.params['build']
+
+ if module.check_mode:
+ install_cmd = 'pkg_add -Imn'
+ else:
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec['flavor']:
+ flavors = pkg_spec['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, pkg_spec['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
+
+ if installed_state is False:
+
+ # Attempt to install the package
+ if build is True and not module.check_mode:
+ (rc, stdout, stderr) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module)
+
+ # The behaviour of pkg_add is a bit different depending on if a
+ # specific version is supplied or not.
+ #
+ # When a specific version is supplied the return code will be 0 when
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") the version number is considered part of the
+ # stem, and the pkg_add behavior behaves the same as if the name did
+ # not contain a version (which it strictly speaking does not).
+ if pkg_spec['version'] or build is True:
+ # Depend on the return code.
+ module.debug("package_present(): depending on return code")
+ if rc:
+ changed=False
+ else:
+ # Depend on stderr instead.
+ module.debug("package_present(): depending on stderr")
+ if stderr:
+ # There is a corner case where having an empty directory in
+ # installpath prior to the right location will result in a
+ # "file:/local/package/directory/ is empty" message on stderr
+ # while still installing the package, so we need to look for
+ # for a message like "packagename-1.0: ok" just in case.
+ match = re.search("\W%s-[^:]+: ok\W" % name, stdout)
+ if match:
+ # It turns out we were able to install the package.
+ module.debug("package_present(): we were able to install package")
+ pass
+ else:
+ # We really did fail, fake the return code.
+ module.debug("package_present(): we really did fail")
+ rc = 1
+ changed=False
+ else:
+ module.debug("package_present(): stderr was not set")
+
+ if rc == 0:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ changed=True
+
+ else:
+ rc = 0
+ stdout = ''
+ stderr = ''
+ changed=False
+
+ return (rc, stdout, stderr, changed)
+
+# Function used to make sure a package is the latest available version.
+def package_latest(name, installed_state, pkg_spec, module):
+
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -umn'
+ else:
+ upgrade_cmd = 'pkg_add -um'
+
+ pre_upgrade_name = ''
+
+ if installed_state is True:
+
+ # Attempt to upgrade the package.
+ (rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module)
+
+ # Look for output looking something like "nmap-6.01->6.25: ok" to see if
+ # something changed (or would have changed). Use \W to delimit the match
+ # from progress meter output.
+ changed = False
+ for installed_name in pkg_spec['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search("\W%s->.+: ok\W" % installed_name, stdout)
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ changed = True
+ break
+
+ # FIXME: This part is problematic. Based on the issues mentioned (and
+ # handled) in package_present() it is not safe to blindly trust stderr
+ # as an indicator that the command failed, and in the case with
+ # empty installpath directories this will break.
+ #
+ # For now keep this safeguard here, but ignore it if we managed to
+ # parse out a successful update above. This way we will report a
+ # successful run when we actually modify something but fail
+ # otherwise.
+ if changed != True:
+ if stderr:
+ rc=1
+
+ return (rc, stdout, stderr, changed)
+
+ else:
+ # If package was not installed at all just make it present.
+ module.debug("package_latest(): package is not installed, calling package_present()")
+ return package_present(name, installed_state, pkg_spec, module)
+
+# Function used to make sure a package is not installed.
+def package_absent(name, installed_state, module):
+ if module.check_mode:
+ remove_cmd = 'pkg_delete -In'
+ else:
+ remove_cmd = 'pkg_delete -I'
+
+ if installed_state is True:
+
+ # Attempt to remove the package.
+ rc, stdout, stderr = execute_command("%s %s" % (remove_cmd, name), module)
+
+ if rc == 0:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ changed=True
+ else:
+ changed=False
+
+ else:
+ rc = 0
+ stdout = ''
+ stderr = ''
+ changed=False
+
+ return (rc, stdout, stderr, changed)
+
+# Function used to parse the package name based on packages-specs(7).
+# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
+def parse_package_name(name, pkg_spec, module):
+ module.debug("parse_package_name(): parsing name: %s" % name)
+ # Do some initial matches so we can base the more advanced regex on that.
+ version_match = re.search("-[0-9]", name)
+ versionless_match = re.search("--", name)
+
+ # Stop if someone is giving us a name that both has a version and is
+ # version-less at the same time.
+ if version_match and versionless_match:
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
+
+ # If name includes a version.
+ if version_match:
+ match = re.search("^(?P<stem>.*)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?$", name)
+ if match:
+ pkg_spec['stem'] = match.group('stem')
+ pkg_spec['version_separator'] = '-'
+ pkg_spec['version'] = match.group('version')
+ pkg_spec['flavor_separator'] = match.group('flavor_separator')
+ pkg_spec['flavor'] = match.group('flavor')
+ pkg_spec['style'] = 'version'
+ else:
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
+
+ # If name includes no version but is version-less ("--").
+ elif versionless_match:
+ match = re.search("^(?P<stem>.*)--(?P<flavor>[a-z].*)?$", name)
+ if match:
+ pkg_spec['stem'] = match.group('stem')
+ pkg_spec['version_separator'] = '-'
+ pkg_spec['version'] = None
+ pkg_spec['flavor_separator'] = '-'
+ pkg_spec['flavor'] = match.group('flavor')
+ pkg_spec['style'] = 'versionless'
+ else:
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
+
+ # If name includes no version, and is not version-less, it is all a stem.
+ else:
+ match = re.search("^(?P<stem>.*)$", name)
+ if match:
+ pkg_spec['stem'] = match.group('stem')
+ pkg_spec['version_separator'] = None
+ pkg_spec['version'] = None
+ pkg_spec['flavor_separator'] = None
+ pkg_spec['flavor'] = None
+ pkg_spec['style'] = 'stem'
+ else:
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # If the stem contains an "%" then it needs special treatment.
+ branch_match = re.search("%", pkg_spec['stem'])
+ if branch_match:
+
+ branch_release = "6.0"
+
+ if version_match or versionless_match:
+ module.fail_json(msg="package name using 'branch' syntax also has a version or is version-less: " + name)
+ if StrictVersion(platform.release()) < StrictVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ pkg_spec['style'] = 'branch'
+
+ # Sanity check that there are no trailing dashes in flavor.
+ # Try to stop strange stuff early so we can be strict later.
+ if pkg_spec['flavor']:
+ match = re.search("-$", pkg_spec['flavor'])
+ if match:
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec['flavor'])
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec['subpackage'] = None
+ if pkg_spec['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec['stem'] + (pkg_spec['version_separator'] or '-') + (pkg_spec['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec['flavor']:
+ looking_for += pkg_spec['flavor_separator'] + pkg_spec['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x:x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec['subpackage'] = parts[1]
+ return parts[0]
+
+# Function used for upgrading all installed packages.
+def upgrade_packages(module):
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -Imnu'
+ else:
+ upgrade_cmd = 'pkg_add -Imu'
+
+ # Attempt to upgrade all packages.
+ rc, stdout, stderr = execute_command("%s" % upgrade_cmd, module)
+
+ # Try to find any occurance of a package changing version like:
+ # "bzip2-1.0.6->1.0.6p0: ok".
+ match = re.search("\W\w.+->.+: ok\W", stdout)
+ if match:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ changed=True
+
+ else:
+ changed=False
+
+ # It seems we can not trust the return value, so depend on the presence of
+ # stderr to know if something failed.
+ if stderr:
+ rc = 1
+ else:
+ rc = 0
+
+ return (rc, stdout, stderr, changed)
+
+# ===========================================
+# Main control flow.
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build = dict(default='no', type='bool'),
+ ports_dir = dict(default='/usr/ports'),
+ ),
+ supports_check_mode = True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
+
+ rc = 0
+ stdout = ''
+ stderr = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+ result['build'] = build
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ pkg_spec = {}
+ parse_package_name('sqlports', pkg_spec, module)
+ installed_state = get_package_state('sqlports', pkg_spec, module)
+ if not installed_state:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present('sqlports', installed_state, pkg_spec, module)
+
+ if name == '*':
+ if state != 'latest':
+ module.fail_json(msg="the package name '*' is only valid when using state=latest")
+ else:
+ # Perform an upgrade of all installed packages.
+ (rc, stdout, stderr, changed) = upgrade_packages(module)
+ else:
+ # Parse package name and put results in the pkg_spec dictionary.
+ pkg_spec = {}
+ parse_package_name(name, pkg_spec, module)
+
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ if pkg_spec['style'] == 'branch' and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], name))
+
+ # Get package state.
+ installed_state = get_package_state(name, pkg_spec, module)
+
+ # Perform requested action.
+ if state in ['installed', 'present']:
+ (rc, stdout, stderr, changed) = package_present(name, installed_state, pkg_spec, module)
+ elif state in ['absent', 'removed']:
+ (rc, stdout, stderr, changed) = package_absent(name, installed_state, module)
+ elif state == 'latest':
+ (rc, stdout, stderr, changed) = package_latest(name, installed_state, pkg_spec, module)
+
+ if rc != 0:
+ if stderr:
+ module.fail_json(msg=stderr)
+ else:
+ module.fail_json(msg=stdout)
+
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+# Import module snippets.
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/opkg.py b/lib/ansible/modules/extras/packaging/os/opkg.py
new file mode 100644
index 0000000000..9ac8f99b8c
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/opkg.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com>
+# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: opkg
+author: "Patrick Pelletier (@skinp)"
+short_description: Package manager for OpenWrt
+description:
+ - Manages OpenWrt packages
+version_added: "1.1"
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ force:
+ description:
+ - opkg --force parameter used
+ choices: ["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]
+ required: false
+ default: absent
+ version_added: "2.0"
+ update_cache:
+ description:
+ - update the package db first
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+notes: []
+'''
+EXAMPLES = '''
+- opkg: name=foo state=present
+- opkg: name=foo state=present update_cache=yes
+- opkg: name=foo state=absent
+- opkg: name=foo,bar state=absent
+- opkg: name=foo state=present force=overwrite
+'''
+
+import pipes
+
+def update_package_db(module, opkg_path):
+ """ Updates packages list. """
+
+ rc, out, err = module.run_command("%s update" % opkg_path)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def query_package(module, opkg_path, name, state="present"):
+ """ Returns whether a package is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, opkg_path, packages):
+ """ Uninstalls one or more packages if installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
+
+ if query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, opkg_path, packages):
+ """ Installs one or more packages if not already installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
+
+ if not query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(aliases=["pkg"], required=True),
+ state = dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force = dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]),
+ update_cache = dict(default="no", aliases=["update-cache"], type='bool')
+ )
+ )
+
+ opkg_path = module.get_bin_path('opkg', True, ['/bin'])
+
+ p = module.params
+
+ if p["update_cache"]:
+ update_package_db(module, opkg_path)
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] in ["present", "installed"]:
+ install_packages(module, opkg_path, pkgs)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_packages(module, opkg_path, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/pacman.py b/lib/ansible/modules/extras/packaging/os/pacman.py
new file mode 100644
index 0000000000..74c474ad92
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/pacman.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Afterburn <http://github.com/afterburn>
+# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: pacman
+short_description: Manage packages with I(pacman)
+description:
+ - Manage packages with the I(pacman) package manager, which is used by
+ Arch Linux and its variants.
+version_added: "1.0"
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "'Aaron Bull Schaefer (@elasticdog)' <aaron@elasticdog.com>"
+ - "Afterburn"
+notes: []
+requirements: []
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: false
+ default: null
+ aliases: [ 'pkg', 'package' ]
+
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: "present"
+ choices: ["present", "absent", "latest"]
+
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ required: false
+ default: no
+ choices: ["yes", "no"]
+ version_added: "1.3"
+
+ force:
+ description:
+ - When removing package - force remove package, without any
+ checks. When update_cache - force redownload repo
+ databases.
+ required: false
+ default: no
+ choices: ["yes", "no"]
+ version_added: "2.0"
+
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ required: false
+ default: no
+ choices: ["yes", "no"]
+ aliases: [ 'update-cache' ]
+
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ required: false
+ default: no
+ choices: ["yes", "no"]
+ version_added: "2.0"
+'''
+
+EXAMPLES = '''
+# Install package foo
+- pacman: name=foo state=present
+
+# Upgrade package foo
+- pacman: name=foo state=latest update_cache=yes
+
+# Remove packages foo and bar
+- pacman: name=foo,bar state=absent
+
+# Recursively remove package baz
+- pacman: name=baz state=absent recurse=yes
+
+# Run the equivalent of "pacman -Sy" as a separate step
+- pacman: update_cache=yes
+
+# Run the equivalent of "pacman -Su" as a separate step
+- pacman: upgrade=yes
+
+# Run the equivalent of "pacman -Syu" as a separate step
+- pacman: update_cache=yes upgrade=yes
+
+# Run the equivalent of "pacman -Rdd", force remove package baz
+- pacman: name=baz state=absent force=yes
+'''
+
+import shlex
+import os
+import re
+import sys
+
+def get_version(pacman_output):
+ """Take pacman -Qi or pacman -Si output and get the Version"""
+ lines = pacman_output.split('\n')
+ for line in lines:
+ if 'Version' in line:
+ return line.split(':')[1].strip()
+ return None
+
+def query_package(module, pacman_path, name, state="present"):
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available"""
+ if state == "present":
+ lcmd = "%s -Qi %s" % (pacman_path, name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False, False
+
+ # get the version installed locally (if any)
+ lversion = get_version(lstdout)
+
+ rcmd = "%s -Si %s" % (pacman_path, name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ # get the version in the repository
+ rversion = get_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally, and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion), False
+
+ # package is installed but cannot fetch remote Version. Last True stands for the error
+ return True, True, True
+
+
+def update_package_db(module, pacman_path):
+ if module.params["force"]:
+ args = "Syy"
+ else:
+ args = "Sy"
+
+ cmd = "%s -%s" % (pacman_path, args)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not update package db")
+
+def upgrade(module, pacman_path):
+ cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
+ cmdneedrefresh = "%s -Qqu" % (pacman_path)
+ rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
+
+ if rc == 0:
+ if module.check_mode:
+ data = stdout.split('\n')
+ module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data) - 1))
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+
+def remove_packages(module, pacman_path, packages):
+ if module.params["recurse"] or module.params["force"]:
+ if module.params["recurse"]:
+ args = "Rs"
+ if module.params["force"]:
+ args = "Rdd"
+ if module.params["recurse"] and module.params["force"]:
+ args = "Rdds"
+ else:
+ args = "R"
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -%s %s --noconfirm" % (pacman_path, args, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pacman_path, state, packages, package_files):
+ install_c = 0
+ package_err = []
+ message = ""
+
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present or state == latest and is up-to-date then skip
+ installed, updated, latestError = query_package(module, pacman_path, package)
+ if latestError and state == 'latest':
+ package_err.append(package)
+
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if package_files[i]:
+ params = '-U %s' % package_files[i]
+ else:
+ params = '-S %s' % package
+
+ cmd = "%s %s --noconfirm --needed" % (pacman_path, params)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (package))
+
+ install_c += 1
+
+ if state == 'latest' and len(package_err) > 0:
+ message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message))
+
+ module.exit_json(changed=False, msg="package(s) already installed. %s" % (message))
+
+def check_packages(module, pacman_path, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state)
+
+
+def expand_package_groups(module, pacman_path, pkgs):
+ expanded = []
+
+ for pkg in pkgs:
+ cmd = "%s -Sgq %s" % (pacman_path, pkg)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ # A group was found matching the name, so expand it
+ for name in stdout.split('\n'):
+ name = name.strip()
+ if name:
+ expanded.append(name)
+ else:
+ expanded.append(pkg)
+
+ return expanded
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(aliases=['pkg', 'package'], type='list'),
+ state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ recurse = dict(default=False, type='bool'),
+ force = dict(default=False, type='bool'),
+ upgrade = dict(default=False, type='bool'),
+ update_cache = dict(default=False, aliases=['update-cache'], type='bool')
+ ),
+ required_one_of = [['name', 'update_cache', 'upgrade']],
+ supports_check_mode = True)
+
+ pacman_path = module.get_bin_path('pacman', True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p["update_cache"] and not module.check_mode:
+ update_package_db(module, pacman_path)
+ if not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Updated the package master lists')
+
+ if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Would have updated the package cache')
+
+ if p['upgrade']:
+ upgrade(module, pacman_path)
+
+ if p['name']:
+ pkgs = expand_package_groups(module, pacman_path, p['name'])
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ if pkg.endswith('.pkg.tar.xz'):
+ # The package given is a filename, extract the raw pkg name from
+ # it and store the filename
+ pkg_files.append(pkg)
+ pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
+ else:
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pacman_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
+ elif p['state'] == 'absent':
+ remove_packages(module, pacman_path, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/pkg5.py b/lib/ansible/modules/extras/packaging/os/pkg5.py
new file mode 100644
index 0000000000..4fb34d7a51
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/pkg5.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: pkg5
+author: "Peter Oliver (@mavit)"
+short_description: Manages packages with the Solaris 11 Image Packaging System
+version_added: 1.9
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+notes:
+ - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
+options:
+ name:
+ description:
+ - An FRMI of the package(s) to be installed/removed/updated.
+ - Multiple packages may be specified, separated by C(,).
+ required: true
+ state:
+ description:
+ - Whether to install (I(present), I(latest)), or remove (I(absent)) a
+ package.
+ required: false
+ default: present
+ choices: [ present, latest, absent ]
+ accept_licenses:
+ description:
+ - Accept any licences.
+ required: false
+ default: false
+ choices: [ true, false ]
+ aliases: [ accept_licences, accept ]
+'''
+EXAMPLES = '''
+# Install Vim:
+- pkg5: name=editor/vim
+
+# Remove finger daemon:
+- pkg5: name=service/network/finger state=absent
+
+# Install several packages at once:
+- pkg5:
+ name:
+ - /file/gnu-findutils
+ - /text/gnu-grep
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='list'),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'installed',
+ 'latest',
+ 'absent',
+ 'uninstalled',
+ 'removed',
+ ]
+ ),
+ accept_licenses=dict(
+ type='bool',
+ default=False,
+ aliases=['accept_licences', 'accept'],
+ ),
+ )
+ )
+
+ params = module.params
+ packages = []
+
+ # pkg(5) FRMIs include a comma before the release number, but
+ # AnsibleModule will have split this into multiple items for us.
+ # Try to spot where this has happened and fix it.
+ for fragment in params['name']:
+ if (
+ re.search('^\d+(?:\.\d+)*', fragment)
+ and packages and re.search('@[^,]*$', packages[-1])
+ ):
+ packages[-1] += ',' + fragment
+ else:
+ packages.append(fragment)
+
+ if params['state'] in ['present', 'installed']:
+ ensure(module, 'present', packages, params)
+ elif params['state'] in ['latest']:
+ ensure(module, 'latest', packages, params)
+ elif params['state'] in ['absent', 'uninstalled', 'removed']:
+ ensure(module, 'absent', packages, params)
+
+
+def ensure(module, state, packages, params):
+ response = {
+ 'results': [],
+ 'msg': '',
+ }
+ behaviour = {
+ 'present': {
+ 'filter': lambda p: not is_installed(module, p),
+ 'subcommand': 'install',
+ },
+ 'latest': {
+ 'filter': lambda p: not is_latest(module, p),
+ 'subcommand': 'install',
+ },
+ 'absent': {
+ 'filter': lambda p: is_installed(module, p),
+ 'subcommand': 'uninstall',
+ },
+ }
+
+ if params['accept_licenses']:
+ accept_licenses = ['--accept']
+ else:
+ accept_licenses = []
+
+ to_modify = filter(behaviour[state]['filter'], packages)
+ if to_modify:
+ rc, out, err = module.run_command(
+ [
+ 'pkg', behaviour[state]['subcommand']
+ ]
+ + accept_licenses
+ + [
+ '-q', '--'
+ ] + to_modify
+ )
+ response['rc'] = rc
+ response['results'].append(out)
+ response['msg'] += err
+ response['changed'] = True
+ if rc != 0:
+ module.fail_json(**response)
+
+ module.exit_json(**response)
+
+
+def is_installed(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '--', package])
+ return not bool(int(rc))
+
+
+def is_latest(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
+ return bool(int(rc))
+
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/pkg5_publisher.py b/lib/ansible/modules/extras/packaging/os/pkg5_publisher.py
new file mode 100644
index 0000000000..79eccd2ec0
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/pkg5_publisher.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: pkg5_publisher
+author: "Peter Oliver (@mavit)"
+short_description: Manages Solaris 11 Image Packaging System publishers
+version_added: 1.9
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+ - This modules will configure which publishers a client will download IPS
+ packages from.
+options:
+ name:
+ description:
+ - The publisher's name.
+ required: true
+ aliases: [ publisher ]
+ state:
+ description:
+ - Whether to ensure that a publisher is present or absent.
+ required: false
+ default: present
+ choices: [ present, absent ]
+ sticky:
+ description:
+ - Packages installed from a sticky repository can only receive updates
+ from that repository.
+ required: false
+ default: null
+ choices: [ true, false ]
+ enabled:
+ description:
+ - Is the repository enabled or disabled?
+ required: false
+ default: null
+ choices: [ true, false ]
+ origin:
+ description:
+ - A path or URL to the repository.
+ - Multiple values may be provided.
+ required: false
+ default: null
+ mirror:
+ description:
+ - A path or URL to the repository mirror.
+ - Multiple values may be provided.
+ required: false
+ default: null
+'''
+EXAMPLES = '''
+# Fetch packages for the solaris publisher direct from Oracle:
+- pkg5_publisher: name=solaris sticky=true origin=https://pkg.oracle.com/solaris/support/
+
+# Configure a publisher for locally-produced packages:
+- pkg5_publisher: name=site origin=https://pkg.example.com/site/
+'''
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['publisher']),
+ state=dict(default='present', choices=['present', 'absent']),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
+ # search_after=dict(),
+ # search_before=dict(),
+ origin=dict(type='list'),
+ mirror=dict(type='list'),
+ )
+ )
+
+ for option in ['origin', 'mirror']:
+ if module.params[option] == ['']:
+ module.params[option] = []
+
+ if module.params['state'] == 'present':
+ modify_publisher(module, module.params)
+ else:
+ unset_publisher(module, module.params['name'])
+
+
+def modify_publisher(module, params):
+ name = params['name']
+ existing = get_publishers(module)
+
+ if name in existing:
+ for option in ['origin', 'mirror', 'sticky', 'enabled']:
+ if params[option] != None:
+ if params[option] != existing[name][option]:
+ return set_publisher(module, params)
+ else:
+ return set_publisher(module, params)
+
+ module.exit_json()
+
+
+def set_publisher(module, params):
+ name = params['name']
+ args = []
+
+ if params['origin'] != None:
+ args.append('--remove-origin=*')
+ args.extend(['--add-origin=' + u for u in params['origin']])
+ if params['mirror'] != None:
+ args.append('--remove-mirror=*')
+ args.extend(['--add-mirror=' + u for u in params['mirror']])
+
+ if params['sticky'] != None and params['sticky']:
+ args.append('--sticky')
+ elif params['sticky'] != None:
+ args.append('--non-sticky')
+
+ if params['enabled'] != None and params['enabled']:
+ args.append('--enable')
+ elif params['enabled'] != None:
+ args.append('--disable')
+
+ rc, out, err = module.run_command(
+ ["pkg", "set-publisher"] + args + [name],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ module.exit_json(**response)
+
+
+def unset_publisher(module, publisher):
+ if not publisher in get_publishers(module):
+ module.exit_json()
+
+ rc, out, err = module.run_command(
+ ["pkg", "unset-publisher", publisher],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ module.exit_json(**response)
+
+
+def get_publishers(module):
+ rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
+
+ lines = out.splitlines()
+ keys = lines.pop(0).lower().split("\t")
+
+ publishers = {}
+ for line in lines:
+ values = dict(zip(keys, map(unstringify, line.split("\t"))))
+ name = values['publisher']
+
+ if not name in publishers:
+ publishers[name] = dict(
+ (k, values[k]) for k in ['sticky', 'enabled']
+ )
+ publishers[name]['origin'] = []
+ publishers[name]['mirror'] = []
+
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
+
+ return publishers
+
+
+def unstringify(val):
+ if val == "-" or val == '':
+ return None
+ elif val == "true":
+ return True
+ elif val == "false":
+ return False
+ else:
+ return val
+
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/pkgin.py b/lib/ansible/modules/extras/packaging/os/pkgin.py
new file mode 100755
index 0000000000..055891ebe0
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/pkgin.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
+# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
+# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
+#
+# Written by Shaun Zinck
+# Based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: pkgin
+short_description: Package manager for SmartOS, NetBSD, et al.
+description:
+ - "The standard package manager for SmartOS, but also usable on NetBSD
+ or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+version_added: "1.0"
+author:
+ - "Larry Gilbert (L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
+notes:
+ - "Known bug with pkgin < 0.8.0: if a package is removed and another
+ package depends on it, the other package will be silently removed as
+ well. New to Ansible 1.9: check-mode support."
+options:
+ name:
+ description:
+ - Name of package to install/remove;
+ - multiple names may be given, separated by commas
+ required: false
+ default: null
+ state:
+ description:
+ - Intended state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ clean:
+ description:
+ - Clean packages cache
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ force:
+ description:
+ - Force package reinstall
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+'''
+
+EXAMPLES = '''
+# install package foo
+- pkgin: name=foo state=present
+
+# Update database and install "foo" package
+- pkgin: name=foo update_cache=yes
+
+# remove package foo
+- pkgin: name=foo state=absent
+
+# remove packages foo and bar
+- pkgin: name=foo,bar state=absent
+
+# Update repositories as a separate step
+- pkgin: update_cache=yes
+
+# Upgrade main packages (equivalent to C(pkgin upgrade))
+- pkgin: upgrade=yes
+
+# Upgrade all packages (equivalent to C(pkgin full-upgrade))
+- pkgin: full_upgrade=yes
+
+# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
+- pkgin: full_upgrade=yes force=yes
+
+# clean packages cache (equivalent to C(pkgin clean))
+- pkgin: clean=yes
+'''
+
+
+import re
+
+def query_package(module, name):
+ """Search for the package by name.
+
+ Possible return values:
+ * "present" - installed, no upgrade needed
+ * "outdated" - installed, but can be upgraded
+ * False - not installed or not found
+ """
+
+ # test whether '-p' (parsable) flag is supported.
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+
+ if rc == 0:
+ pflag = '-p'
+ splitchar = ';'
+ else:
+ pflag = ''
+ splitchar = ' '
+
+ # Use "pkgin search" to find the package. The regular expression will
+ # only match on the complete name.
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+
+ # rc will not be 0 unless the search was a success
+ if rc == 0:
+
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name != pkgname_without_version:
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return 'outdated'
+ elif raw_state == '=' or raw_state == '>':
+ return 'present'
+ else:
+ return False
+ # no fall-through
+
+ # No packages were matched, so return False
+ return False
+
+
+def format_action_message(module, action, count):
+ vars = { "actioned": action,
+ "count": count }
+
+ if module.check_mode:
+ message = "would have %(actioned)s %(count)d package" % vars
+ else:
+ message = "%(actioned)s %(count)d package" % vars
+
+ if count == 1:
+ return message
+ else:
+ return message + "s"
+
+
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = { "pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
+
+ if module.check_mode:
+ return "%(pkgin)s -n %(command)s %(package)s" % vars
+ else:
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "remove", package))
+
+ if not module.check_mode and query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "install", package))
+
+ if not module.check_mode and not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "datebase is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db")
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd)
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache")
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default="present", choices=["present","absent"]),
+ name = dict(aliases=["pkg"], type='list'),
+ update_cache = dict(default='no', type='bool'),
+ upgrade = dict(default='no', type='bool'),
+ full_upgrade = dict(default='no', type='bool'),
+ clean = dict(default='no', type='bool'),
+ force = dict(default='no', type='bool')),
+ required_one_of = [['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode = True)
+
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
+
+ if p["state"] == "present":
+ install_packages(module, pkgs)
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/pkgng.py b/lib/ansible/modules/extras/packaging/os/pkgng.py
new file mode 100644
index 0000000000..5583bb18ee
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/pkgng.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which
+ is available in versions after 9.0.
+version_added: "1.2"
+options:
+ name:
+ description:
+ - Name of package to install/remove.
+ required: true
+ state:
+ description:
+ - State of the package.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ version_added: "1.6"
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) option.
+ required: false
+ chroot:
+ version_added: "2.1"
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) option.
+ required: false
+ autoremove:
+ version_added: "2.2"
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+'''
+
+EXAMPLES = '''
+# Install package foo
+- pkgng: name=foo state=present
+
+# Annotate package foo and bar
+- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar
+
+# Remove packages foo and bar
+- pkgng: name=foo,bar state=absent
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = map(lambda x: int(x), re.split(r'[\._]', out))
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ return (True, "removed %s package(s)" % remove_c)
+
+ return (False, "package(s) already absent")
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
+
+ install_c = 0
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue")
+
+ for package in packages:
+ if query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package))
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c))
+
+ return (False, "package(s) already present")
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json("could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json("could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json("could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json("could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return False, "no package(s) to autoremove"
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+
+ return True, "autoremoved %d package(s)" % (autoremove_c)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default="present", choices=["present","absent"], required=False),
+ name = dict(aliases=["pkg"], required=True, type='list'),
+ cached = dict(default=False, type='bool'),
+ annotation = dict(default="", required=False),
+ pkgsite = dict(default="", required=False),
+ rootdir = dict(default="", required=False, type='path'),
+ chroot = dict(default="", required=False, type='path'),
+ autoremove = dict(default=False, type='bool')),
+ supports_check_mode = True,
+ mutually_exclusive =[["rootdir", "chroot"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["state"] == "present":
+ _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent":
+ _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/pkgutil.py b/lib/ansible/modules/extras/packaging/os/pkgutil.py
new file mode 100644
index 0000000000..35ccb4e190
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/pkgutil.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
+# based on svr4pkg by
+# Boyd Adamson <boyd () boydadamson.com> (2012)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: pkgutil
+short_description: Manage CSW-Packages on Solaris
+description:
+ - Manages CSW packages (SVR4 format) on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Pkgutil is an advanced packaging system, which resolves dependency on installation.
+ It is designed for CSW packages.
+version_added: "1.3"
+author: "Alexander Winkler (@dermute)"
+options:
+ name:
+ description:
+ - Package name, e.g. (C(CSWnrpe))
+ required: true
+ site:
+ description:
+ - Specifies the repository path to install the package from.
+ - Its global definition is done in C(/etc/opt/csw/pkgutil.conf).
+ required: false
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - The upgrade (C(latest)) operation will update/install the package to the latest version available.
+ - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them."
+ required: true
+ choices: ["present", "absent", "latest"]
+ update_catalog:
+ description:
+ - If you want to refresh your catalog from the mirror, set this to (C(yes)).
+ required: false
+ default: False
+ version_added: "2.1"
+'''
+
+EXAMPLES = '''
+# Install a package
+pkgutil: name=CSWcommon state=present
+
+# Install a package from a specific repository
+pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest'
+'''
+
+import os
+import pipes
+
+def package_installed(module, name):
+ cmd = ['pkginfo']
+ cmd.append('-q')
+ cmd.append(name)
+ rc, out, err = run_command(module, cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+def package_latest(module, name, site):
+ # Only supports one package
+ cmd = [ 'pkgutil', '-U', '--single', '-c' ]
+ if site is not None:
+ cmd += [ '-t', site]
+ cmd.append(name)
+ rc, out, err = run_command(module, cmd)
+ # replace | tail -1 |grep -v SAME
+ # use -2, because splitting on \n create a empty line
+ # at the end of the list
+ return 'SAME' in out.split('\n')[-2]
+
+def run_command(module, cmd, **kwargs):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
+
+def package_install(module, state, name, site, update_catalog):
+ cmd = [ 'pkgutil', '-iy' ]
+ if update_catalog:
+ cmd += [ '-U' ]
+ if site is not None:
+ cmd += [ '-t', site ]
+ if state == 'latest':
+ cmd += [ '-f' ]
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ return (rc, out, err)
+
+def package_upgrade(module, name, site, update_catalog):
+ cmd = [ 'pkgutil', '-ufy' ]
+ if update_catalog:
+ cmd += [ '-U' ]
+ if site is not None:
+ cmd += [ '-t', site ]
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ return (rc, out, err)
+
+def package_uninstall(module, name):
+ cmd = [ 'pkgutil', '-ry', name]
+ (rc, out, err) = run_command(module, cmd)
+ return (rc, out, err)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required = True),
+ state = dict(required = True, choices=['present', 'absent','latest']),
+ site = dict(default = None),
+ update_catalog = dict(required = False, default = False, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ site = module.params['site']
+ update_catalog = module.params['update_catalog']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if not package_installed(module, name):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, state, name, site, update_catalog)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
+
+ elif state == 'latest':
+ if not package_installed(module, name):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, state, name, site, update_catalog)
+ if len(out) > 75:
+ out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
+
+ else:
+ if not package_latest(module, name, site):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_upgrade(module, name, site, update_catalog)
+ if len(out) > 75:
+ out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
+
+ elif state == 'absent':
+ if package_installed(module, name):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name)
+ if len(out) > 75:
+ out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
+
+ if rc is None:
+ # pkgutil was not executed because the package was already present/absent
+ result['changed'] = False
+ elif rc == 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ result['failed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/portage.py b/lib/ansible/modules/extras/packaging/os/portage.py
new file mode 100644
index 0000000000..4e8507fedf
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/portage.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann <sokann@gmail.com>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+version_added: "1.6"
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ required: false
+ default: null
+
+ state:
+ description:
+ - State of the package atom
+ required: false
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged" ]
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ changed_use:
+ description:
+ - Include installed packages where USE flags have changed, except when
+ - flags that the user has not enabled are added or removed
+ - (--changed-use)
+ required: false
+ default: null
+ choices: [ "yes" ]
+ version_added: 1.8
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If yes, perform "emerge --sync"
+ - If web, perform "emerge-webrsync"
+ required: false
+ default: null
+ choices: [ "yes", "web", "no" ]
+
+ getbinpkg:
+ description:
+ - Prefer packages specified at PORTAGE_BINHOST in make.conf
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+ usepkgonly:
+ description:
+ - Merge only binaries (no compiling). This sets getbinpkg=yes.
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+
+requirements: [ gentoolkit ]
+author:
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare"
+notes: []
+'''
+
+EXAMPLES = '''
+# Make sure package foo is installed
+- portage: package=foo state=present
+
+# Make sure package foo is not installed
+- portage: package=foo state=absent
+
+# Update package foo to the "best" version
+- portage: package=foo update=yes
+
+# Install package foo using PORTAGE_BINHOST setup
+- portage: package=foo getbinpkg=yes
+
+# Re-install world from binary packages only and do not allow any compiling
+- portage: package=@world usepkgonly=yes
+
+# Sync repositories and update world
+- portage: package=@world update=yes deep=yes sync=yes
+
+# Remove unneeded packages
+- portage: depclean=yes
+
+# Remove package foo if it is not explicitly needed
+- portage: package=foo state=absent depclean=yes
+'''
+
+
+import os
+import pipes
+import re
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ cmd = '%s list %s' % (module.equery_path, atom)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.exit_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet --ask=n' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
+# in one go. If that is not desirable, split the packages into multiple tasks
+# instead of joining them together with comma.
+
+
+def emerge_packages(module, packages):
+ p = module.params
+
+ if not (p['update'] or p['noreplace']):
+ for package in packages:
+ if not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
+
+ args = []
+ emerge_flags = {
+ 'update': '--update',
+ 'deep': '--deep',
+ 'newuse': '--newuse',
+ 'changed_use': '--changed-use',
+ 'oneshot': '--oneshot',
+ 'noreplace': '--noreplace',
+ 'nodeps': '--nodeps',
+ 'onlydeps': '--onlydeps',
+ 'quiet': '--quiet',
+ 'verbose': '--verbose',
+ 'getbinpkg': '--getbinpkg',
+ 'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
+ }
+ for flag, arg in emerge_flags.iteritems():
+ if p[flag]:
+ args.append(arg)
+
+ if p['usepkg'] and p['usepkgonly']:
+ module.fail_json(msg='Use only one of usepkg, usepkgonly')
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
+ # this error
+ if (p['usepkgonly'] or p['getbinpkg']) \
+ and 'Permission denied (publickey).' in err:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Please check your PORTAGE_BINHOST configuration in make.conf '
+ 'and your SSH authorized_keys file',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
+ break
+ else:
+ changed = False
+ msg = 'No packages installed.'
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg=msg,
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ args.append('--ask=n')
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=False, type='bool'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=False, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
+ sync=dict(default=None, choices=['yes', 'web']),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']],
+ supports_check_mode=True,
+ )
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+ module.equery_path = module.get_bin_path('equery', required=True)
+
+ p = module.params
+
+ if p['sync']:
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ module.exit_json(msg='Sync successfully finished.')
+
+ packages = []
+ if p['package']:
+ packages.extend(p['package'].split(','))
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/portinstall.py b/lib/ansible/modules/extras/packaging/os/portinstall.py
new file mode 100644
index 0000000000..a5d0e51097
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/portinstall.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, berenddeboer
+# Written by berenddeboer <berend@pobox.com>
+# Based on pkgng module written by bleader <bleader at ratonland.org>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: portinstall
+short_description: Installing packages from FreeBSD's ports system
+description:
+ - Manage packages for FreeBSD using 'portinstall'.
+version_added: "1.3"
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ use_packages:
+ description:
+ - use packages instead of ports whenever available
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+author: "berenddeboer (@berenddeboer)"
+'''
+
+EXAMPLES = '''
+# Install package foo
+- portinstall: name=foo state=present
+
+# Install package security/cyrus-sasl2-saslauthd
+- portinstall: name=security/cyrus-sasl2-saslauthd state=present
+
+# Remove packages foo and bar
+- portinstall: name=foo,bar state=absent
+'''
+
+
+import shlex
+import os
+import sys
+
+def query_package(module, name):
+
+ pkg_info_path = module.get_bin_path('pkg_info', False)
+
+ # Assume that if we have pkg_info, we haven't upgraded to pkgng
+ if pkg_info_path:
+ pkgng = False
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True)
+ else:
+ pkgng = True
+ pkg_info_path = module.get_bin_path('pkg', True)
+ pkg_info_path = pkg_info_path + " info"
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+
+ found = rc == 0
+
+ if not found:
+ # databases/mysql55-client installs as mysql-client, so try solving
+ # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
+ # some package is installed
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ if pkgng:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ else:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+
+ found = rc == 0
+
+ return found
+
+
+def matching_packages(module, name):
+
+ ports_glob_path = module.get_bin_path('ports_glob', True)
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ #counts the numer of packages found
+ occurrences = out.count('\n')
+ if occurrences == 0:
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ occurrences = out.count('\n')
+ return occurrences
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+
+ # If pkg_delete not found, we assume pkgng
+ pkg_delete_path = module.get_bin_path('pkg_delete', False)
+ if not pkg_delete_path:
+ pkg_delete_path = module.get_bin_path('pkg', True)
+ pkg_delete_path = pkg_delete_path + " delete -y"
+
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True)
+
+ if query_package(module, package):
+ name_without_digits = re.sub('[0-9]', '', package)
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True)
+ if query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages, use_packages):
+
+ install_c = 0
+
+ # If portinstall not found, automagically install
+ portinstall_path = module.get_bin_path('portinstall', False)
+ if not portinstall_path:
+ pkg_path = module.get_bin_path('pkg', False)
+ if pkg_path:
+ module.run_command("pkg install -y portupgrade")
+ portinstall_path = module.get_bin_path('portinstall', True)
+
+ if use_packages == "yes":
+ portinstall_params="--use-packages"
+ else:
+ portinstall_params=""
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ # TODO: check how many match
+ matches = matching_packages(module, package)
+ if matches == 1:
+ rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ if not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+ elif matches == 0:
+ module.fail_json(msg="no matches for package %s" % (package))
+ else:
+ module.fail_json(msg="%s matches found for package name %s" % (matches, package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default="present", choices=["present","absent"]),
+ name = dict(aliases=["pkg"], required=True),
+ use_packages = dict(type='bool', default='yes')))
+
+ p = module.params
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] == "present":
+ install_packages(module, pkgs, p["use_packages"])
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/slackpkg.py b/lib/ansible/modules/extras/packaging/os/slackpkg.py
new file mode 100644
index 0000000000..674de538ef
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/slackpkg.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kim Nørgaard
+# Written by Kim Nørgaard <jasen@jasen.dk>
+# Based on pkgng module written by bleader <bleader@ratonland.org>
+# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: slackpkg
+short_description: Package manager for Slackware >= 12.2
+description:
+ - Manage binary packages for Slackware using 'slackpkg' which
+ is available in versions after 12.2.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+
+ state:
+ description:
+ - state of the package, you can use "installed" as an alias for C(present) and removed as one for c(absent).
+ choices: [ 'present', 'absent', 'latest' ]
+ required: false
+ default: present
+
+ update_cache:
+ description:
+ - update the package database first
+ required: false
+ default: false
+ choices: [ true, false ]
+
+author: Kim Nørgaard (@KimNorgaard)
+requirements: [ "Slackware >= 12.2" ]
+'''
+
+EXAMPLES = '''
+# Install package foo
+- slackpkg: name=foo state=present
+
+# Remove packages foo and bar
+- slackpkg: name=foo,bar state=absent
+
+# Make sure that it is the most updated package
+- slackpkg: name=foo state=latest
+
+'''
+
+
+def query_package(module, slackpkg_path, name):
+
+ import glob
+ import platform
+
+ machine = platform.machine()
+ packages = glob.glob("/var/log/packages/%s-*-[%s|noarch]*" % (name,
+ machine))
+
+ if len(packages) > 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, slackpkg_path, packages):
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ remove %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, slackpkg_path, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ install %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def upgrade_packages(module, slackpkg_path, packages):
+ install_c = 0
+
+ for package in packages:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ upgrade %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_cache(module, slackpkg_path):
+ rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ if rc != 0:
+ module.fail_json(msg="Could not update package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']),
+ name=dict(aliases=["pkg"], required=True, type='list'),
+ update_cache=dict(default=False, aliases=["update-cache"],
+ type='bool'),
+ ),
+ supports_check_mode=True)
+
+ slackpkg_path = module.get_bin_path('slackpkg', True)
+
+ p = module.params
+
+ pkgs = p['name']
+
+ if p["update_cache"]:
+ update_cache(module, slackpkg_path)
+
+ if p['state'] == 'latest':
+ upgrade_packages(module, slackpkg_path, pkgs)
+
+ elif p['state'] in ['present', 'installed']:
+ install_packages(module, slackpkg_path, pkgs)
+
+ elif p["state"] in ['removed', 'absent']:
+ remove_packages(module, slackpkg_path, pkgs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/svr4pkg.py b/lib/ansible/modules/extras/packaging/os/svr4pkg.py
new file mode 100644
index 0000000000..807e00f543
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/svr4pkg.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: svr4pkg
+short_description: Manage Solaris SVR4 packages
+description:
+ - Manages SVR4 packages on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Note that this is a very basic packaging system. It will not enforce
+ dependencies on install or remove.
+version_added: "0.9"
+author: "Boyd Adamson (@brontitall)"
+options:
+ name:
+ description:
+ - Package name, e.g. C(SUNWcsr)
+ required: true
+
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - If the package is to be installed, then I(src) is required.
+ - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
+ required: true
+ choices: ["present", "absent"]
+
+ src:
+ description:
+ - Specifies the location to install the package from. Required when C(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
+ proxy:
+ description:
+ - HTTP[s] proxy to be used if C(src) is a URL.
+ response_file:
+ description:
+ - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ required: false
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ version_added: "1.6"
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ choices: ["true", "false"]
+ version_added: "1.6"
+'''
+
+EXAMPLES = '''
+# Install a package from an already copied file
+- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present
+
+# Install a package directly from an http site
+- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current
+
+# Install a package with a response file
+- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present
+
+# Ensure that a package is not installed.
+- svr4pkg: name=SUNWgnome-sound-recorder state=absent
+
+# Ensure that a category is not installed.
+- svr4pkg: name=FIREFOX state=absent category=true
+'''
+
+
+import os
+import tempfile
+
+def package_installed(module, name, category):
+ cmd = [module.get_bin_path('pkginfo', True)]
+ cmd.append('-q')
+ if category:
+ cmd.append('-c')
+ cmd.append(name)
+ rc, out, err = module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+def create_admin_file():
+ (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
+ fullauto = '''
+mail=
+instance=unique
+partial=nocheck
+runlevel=quit
+idepend=nocheck
+rdepend=nocheck
+space=quit
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default
+'''
+ os.write(desc, fullauto)
+ os.close(desc)
+ return filename
+
+def run_command(module, cmd):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True)
+ return module.run_command(cmd)
+
+def package_install(module, name, src, proxy, response_file, zone, category):
+ adminfile = create_admin_file()
+ cmd = [ 'pkgadd', '-n']
+ if zone == 'current':
+ cmd += [ '-G' ]
+ cmd += [ '-a', adminfile, '-d', src ]
+ if proxy is not None:
+ cmd += [ '-x', proxy ]
+ if response_file is not None:
+ cmd += [ '-r', response_file ]
+ if category:
+ cmd += [ '-Y' ]
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+def package_uninstall(module, name, src, category):
+ adminfile = create_admin_file()
+ if category:
+ cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
+ else:
+ cmd = [ 'pkgrm', '-na', adminfile, name]
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required = True),
+ state = dict(required = True, choices=['present', 'absent']),
+ src = dict(default = None),
+ proxy = dict(default = None),
+ response_file = dict(default = None),
+ zone = dict(required=False, default = 'all', choices=['current','all']),
+ category = dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+ state = module.params['state']
+ name = module.params['name']
+ src = module.params['src']
+ proxy = module.params['proxy']
+ response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if src is None:
+ module.fail_json(name=name,
+ msg="src is required when state=present")
+ if not package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+
+ elif state == 'absent':
+ if package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name, src, category)
+ out = out[:75]
+
+ # Returncodes as per pkgadd(1m)
+ # 0 Successful completion
+ # 1 Fatal error.
+ # 2 Warning.
+ # 3 Interruption.
+ # 4 Administration.
+ # 5 Administration. Interaction is required. Do not use pkgadd -n.
+ # 10 Reboot after installation of all packages.
+ # 20 Reboot after installation of this package.
+ # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
+ if rc in (0, 2, 3, 10, 20):
+ result['changed'] = True
+ # no install nor uninstall, or failed
+ else:
+ result['changed'] = False
+
+ # rc will be none when the package already was installed and no action took place
+ # Only return failed=False when the returncode is known to be good as there may be more
+ # undocumented failure return codes
+ if rc not in (None, 0, 2, 10, 20):
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/swdepot.py b/lib/ansible/modules/extras/packaging/os/swdepot.py
new file mode 100644
index 0000000000..b14af74205
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/swdepot.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Raul Melo
+# Written by Raul Melo <raulmelo@gmail.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import pipes
+
+DOCUMENTATION = '''
+---
+module: swdepot
+short_description: Manage packages with swdepot package manager (HP-UX)
+description:
+ - Will install, upgrade and remove packages with swdepot package manager (HP-UX)
+version_added: "1.4"
+notes: []
+author: "Raul Melo (@melodous)"
+options:
+ name:
+ description:
+ - package name.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ version_added: 1.4
+ state:
+ description:
+ - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: true
+ default: null
+ choices: [ 'present', 'latest', 'absent']
+ aliases: []
+ version_added: 1.4
+ depot:
+ description:
+ - The source repository from which install or upgrade a package.
+ required: false
+ default: null
+ choices: []
+ aliases: []
+ version_added: 1.4
+'''
+
+EXAMPLES = '''
+- swdepot: name=unzip-6.0 state=installed depot=repository:/path
+- swdepot: name=unzip state=latest depot=repository:/path
+- swdepot: name=unzip state=absent
+'''
+
+def compare_package(version1, version2):
+ """ Compare version packages.
+ Return values:
+ -1 first minor
+ 0 equal
+ 1 fisrt greater """
+
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ return cmp(normalize(version1), normalize(version2))
+
+def query_package(module, name, depot=None):
+ """ Returns whether a package is installed or not and version. """
+
+ cmd_list = '/usr/sbin/swlist -a revision -l product'
+ if depot:
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
+ else:
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1]
+ else:
+ version = None
+
+ return rc, version
+
+def remove_package(module, name):
+ """ Uninstall package if installed. """
+
+ cmd_remove = '/usr/sbin/swremove'
+ rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+def install_package(module, depot, name):
+ """ Install package if not already installed """
+
+ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
+ rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(aliases=['pkg'], required=True),
+ state = dict(choices=['present', 'absent', 'latest'], required=True),
+ depot = dict(default=None, required=False)
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ depot = module.params['depot']
+
+ changed = False
+ msg = "No changed"
+ rc = 0
+ if ( state == 'present' or state == 'latest' ) and depot == None:
+ output = "depot parameter is mandatory in present or latest task"
+ module.fail_json(name=name, msg=output, rc=rc)
+
+
+ #Check local version
+ rc, version_installed = query_package(module, name)
+ if not rc:
+ installed = True
+ msg = "Already installed"
+
+ else:
+ installed = False
+
+ if ( state == 'present' or state == 'latest' ) and installed == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ changed = True
+ msg = "Package installed"
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'latest' and installed == True:
+ #Check depot version
+ rc, version_depot = query_package(module, name, depot)
+
+ if not rc:
+ if compare_package(version_installed,version_depot) == -1:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ #Install new version
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot
+ changed = True
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ else:
+ output = "Software package not in repository " + depot
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'absent' and installed == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = remove_package(module, name)
+ if not rc:
+ changed = True
+ msg = "Package removed"
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed, name=name, state=state, msg=msg)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/packaging/os/urpmi.py b/lib/ansible/modules/extras/packaging/os/urpmi.py
new file mode 100644
index 0000000000..0b9ec92931
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/urpmi.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Philippe Makowski
+# Written by Philippe Makowski <philippem@mageia.org>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: urpmi
+short_description: Urpmi manager
+description:
+ - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+version_added: "1.3.4"
+options:
+ pkg:
+ description:
+ - name of package to install, upgrade or remove.
+ required: true
+ default: null
+ state:
+ description:
+ - Indicates the desired package state
+ required: false
+ default: present
+ choices: [ "absent", "present" ]
+ update_cache:
+ description:
+ - update the package database first C(urpmi.update -a).
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ no-recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
+ required: false
+ default: yes
+ choices: [ "yes", "no" ]
+ force:
+ description:
+ - Assume "yes" is the answer to any question urpmi has to ask.
+ Corresponds to the C(--force) option for I(urpmi).
+ required: false
+ default: yes
+ choices: [ "yes", "no" ]
+author: "Philippe Makowski (@pmakowski)"
+notes: []
+'''
+
+EXAMPLES = '''
+# install package foo
+- urpmi: pkg=foo state=present
+# remove package foo
+- urpmi: pkg=foo state=absent
+# description: remove packages foo and bar
+- urpmi: pkg=foo,bar state=absent
+# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
+- urpmi: name=bar, state=present, update_cache=yes
+'''
+
+
+import shlex
+import os
+import sys
+
+URPMI_PATH = '/usr/sbin/urpmi'
+URPME_PATH = '/usr/sbin/urpme'
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ cmd = "rpm -q %s" % (name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ cmd = "rpm -q --provides %s" % (name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc == 0
+
+
+def update_package_db(module):
+ cmd = "urpmi.update -a -q"
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ cmd = "%s --auto %s" % (URPME_PATH, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec, force=True, no_recommends=True):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
+ else:
+ no_recommends_yes = ''
+
+ if force:
+ force_yes = '--force'
+ else:
+ force_yes = ''
+
+ cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_recommends_yes, packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # urpmi always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
+ update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
+ force = dict(default=True, type='bool'),
+ no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'),
+ package = dict(aliases=['pkg', 'name'], required=True)))
+
+
+ if not os.path.exists(URPMI_PATH):
+ module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH))
+
+ p = module.params
+
+ force_yes = p['force']
+ no_recommends_yes = p['no_recommends']
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ packages = p['package'].split(',')
+
+ if p['state'] in [ 'installed', 'present' ]:
+ install_packages(module, packages, force_yes, no_recommends_yes)
+
+ elif p['state'] in [ 'removed', 'absent' ]:
+ remove_packages(module, packages)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/packaging/os/yum_repository.py b/lib/ansible/modules/extras/packaging/os/yum_repository.py
new file mode 100644
index 0000000000..dfdd665ed2
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/yum_repository.py
@@ -0,0 +1,754 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+import ConfigParser
+import os
+from ansible.module_utils.pycompat24 import get_exception
+
+
+DOCUMENTATION = '''
+---
+module: yum_repository
+author: Jiri Tyr (@jtyr)
+version_added: '2.1'
+short_description: Add and remove YUM repositories
+description:
+ - Add or remove YUM repositories in RPM-based Linux distributions.
+
+options:
+ async:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - If set to C(yes) Yum will download packages and metadata from this
+ repo in parallel, if possible.
+ bandwidth:
+ required: false
+ default: 0
+ description:
+ - Maximum available network bandwidth in bytes/second. Used with the
+ I(throttle) option.
+ - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
+ throttling will be disabled. If I(throttle) is expressed as a data rate
+ (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
+ throttling).
+ baseurl:
+ required: false
+ default: null
+ description:
+ - URL to the directory where the yum repository's 'repodata' directory
+ lives.
+ - This or the I(mirrorlist) parameter is required if I(state) is set to
+ C(present).
+ cost:
+ required: false
+ default: 1000
+ description:
+ - Relative cost of accessing this repository. Useful for weighing one
+ repo's packages as greater/less than any other.
+ deltarpm_metadata_percentage:
+ required: false
+ default: 100
+ description:
+ - When the relative size of deltarpm metadata vs pkgs is larger than
+ this, deltarpm metadata is not downloaded from the repo. Note that you
+ can give values over C(100), so C(200) means that the metadata is
+ required to be half the size of the packages. Use C(0) to turn off
+ this check, and always download metadata.
+ deltarpm_percentage:
+ required: false
+ default: 75
+ description:
+ - When the relative size of delta vs pkg is larger than this, delta is
+ not used. Use C(0) to turn off delta rpm processing. Local repositories
+ (with file:// I(baseurl)) have delta rpms turned off by default.
+ description:
+ required: false
+ default: null
+ description:
+ - A human readable string describing the repository.
+ - This parameter is only required if I(state) is set to C(present).
+ enabled:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - This tells yum whether or not use this repository.
+ enablegroups:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - Determines whether yum will allow the use of package groups for this
+ repository.
+ exclude:
+ required: false
+ default: null
+ description:
+ - List of packages to exclude from updates or installs. This should be a
+ space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed.
+ - The list can also be a regular YAML array.
+ failovermethod:
+ required: false
+ choices: [roundrobin, priority]
+ default: roundrobin
+ description:
+ - C(roundrobin) randomly selects a URL out of the list of URLs to start
+ with and proceeds through each of them as it encounters a failure
+ contacting the host.
+ - C(priority) starts from the first I(baseurl) listed and reads through
+ them sequentially.
+ file:
+ required: false
+ default: null
+ description:
+ - File to use to save the repo in. Defaults to the value of I(name).
+ gpgcakey:
+ required: false
+ default: null
+ description:
+ - A URL pointing to the ASCII-armored CA key file for the repository.
+ gpgcheck:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Tells yum whether or not it should perform a GPG signature check on
+ packages.
+ gpgkey:
+ required: false
+ default: null
+ description:
+ - A URL pointing to the ASCII-armored GPG key file for the repository.
+ http_caching:
+ required: false
+ choices: [all, packages, none]
+ default: all
+ description:
+ - Determines how upstream HTTP caches are instructed to handle any HTTP
+ downloads that Yum does.
+ - C(all) means that all HTTP downloads should be cached.
+ - C(packages) means that only RPM package downloads should be cached (but
+ not repository metadata downloads).
+ - C(none) means that no HTTP downloads should be cached.
+ include:
+ required: false
+ default: null
+ description:
+ - Include external configuration file. Both, local path and URL is
+ supported. Configuration file will be inserted at the position of the
+ I(include=) line. Included files may contain further include lines.
+ Yum will abort with an error if an inclusion loop is detected.
+ includepkgs:
+ required: false
+ default: null
+ description:
+ - List of packages you want to only use from a repository. This should be
+ a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed. Substitution variables (e.g. C($releasever)) are honored
+ here.
+ - The list can also be a regular YAML array.
+ ip_resolve:
+ required: false
+ choices: [4, 6, IPv4, IPv6, whatever]
+ default: whatever
+ description:
+ - Determines how yum resolves host names.
+ - C(4) or C(IPv4) - resolve to IPv4 addresses only.
+ - C(6) or C(IPv6) - resolve to IPv6 addresses only.
+ keepalive:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - This tells yum whether or not HTTP/1.1 keepalive should be used with
+ this repository. This can improve transfer speeds by using one
+ connection when downloading multiple files from a repository.
+ keepcache:
+ required: false
+ choices: ['0', '1']
+ default: '1'
+ description:
+ - Either C(1) or C(0). Determines whether or not yum keeps the cache of
+ headers and packages after successful installation.
+ metadata_expire:
+ required: false
+ default: 21600
+ description:
+ - Time (in seconds) after which the metadata will expire.
+ - Default value is 6 hours.
+ metadata_expire_filter:
+ required: false
+ choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
+ default: 'read-only:present'
+ description:
+ - Filter the I(metadata_expire) time, allowing a trade of speed for
+ accuracy if a command doesn't require it. Each yum command can specify
+ that it requires a certain level of timeliness quality from the remote
+ repos. from "I'm about to install/upgrade, so this better be current"
+ to "Anything that's available is good enough".
+ - C(never) - Nothing is filtered, always obey I(metadata_expire).
+ - C(read-only:past) - Commands that only care about past information are
+ filtered from metadata expiring. Eg. I(yum history) info (if history
+ needs to lookup anything about a previous transaction, then by
+ definition the remote package was available in the past).
+ - C(read-only:present) - Commands that are balanced between past and
+ future. Eg. I(yum list yum).
+ - C(read-only:future) - Commands that are likely to result in running
+ other commands which will require the latest metadata. Eg.
+ I(yum check-update).
+ - Note that this option does not override "yum clean expire-cache".
+ metalink:
+ required: false
+ default: null
+ description:
+ - Specifies a URL to a metalink file for the repomd.xml, a list of
+ mirrors for the entire repository are generated by converting the
+ mirrors for the repomd.xml file to a I(baseurl).
+ mirrorlist:
+ required: false
+ default: null
+ description:
+ - Specifies a URL to a file containing a list of baseurls.
+ - This or the I(baseurl) parameter is required if I(state) is set to
+ C(present).
+ mirrorlist_expire:
+ required: false
+ default: 21600
+ description:
+ - Time (in seconds) after which the mirrorlist locally cached will
+ expire.
+ - Default value is 6 hours.
+ name:
+ required: true
+ description:
+ - Unique repository ID.
+ - This parameter is only required if I(state) is set to C(present) or
+ C(absent).
+ params:
+ required: false
+ default: null
+ description:
+ - Option used to allow the user to overwrite any of the other options.
+ To remove an option, set the value of the option to C(null).
+ password:
+ required: false
+ default: null
+ description:
+ - Password to use with the username for basic authentication.
+ priority:
+ required: false
+ default: 99
+ description:
+ - Enforce ordered protection of repositories. The value is an integer
+ from 1 to 99.
+ - This option only works if the YUM Priorities plugin is installed.
+ protect:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Protect packages from updates from other repositories.
+ proxy:
+ required: false
+ default: null
+ description:
+ - URL to the proxy server that yum should use. Set to C(_none_) to disable
+ the global proxy setting.
+ proxy_password:
+ required: false
+ default: null
+ description:
+ - Username to use for proxy.
+ proxy_username:
+ required: false
+ default: null
+ description:
+ - Password for this proxy.
+ repo_gpgcheck:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - This tells yum whether or not it should perform a GPG signature check
+ on the repodata from this repository.
+ reposdir:
+ required: false
+ default: /etc/yum.repos.d
+ description:
+ - Directory where the C(.repo) files will be stored.
+ retries:
+ required: false
+ default: 10
+ description:
+ - Set the number of times any attempt to retrieve a file should retry
+ before returning an error. Setting this to C(0) makes yum try forever.
+ s3_enabled:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Enables support for S3 repositories.
+ - This option only works if the YUM S3 plugin is installed.
+ skip_if_unavailable:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - If set to C(yes) yum will continue running if this repository cannot be
+ contacted for any reason. This should be set carefully as all repos are
+ consulted for any given command.
+ ssl_check_cert_permissions:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Whether yum should check the permissions on the paths for the
+ certificates on the repository (both remote and local).
+ - If we can't read any of the files then yum will force
+ I(skip_if_unavailable) to be C(yes). This is most useful for non-root
+ processes which use yum on repos that have client cert files which are
+ readable only by root.
+ sslcacert:
+ required: false
+ default: null
+ description:
+ - Path to the directory containing the databases of the certificate
+ authorities yum should use to verify SSL certificates.
+ sslclientcert:
+ required: false
+ default: null
+ description:
+ - Path to the SSL client certificate yum should use to connect to
+ repos/remote sites.
+ sslclientkey:
+ required: false
+ default: null
+ description:
+ - Path to the SSL client key yum should use to connect to repos/remote
+ sites.
+ sslverify:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - Defines whether yum should verify SSL certificates/hosts at all.
+ state:
+ required: false
+ choices: [absent, present]
+ default: present
+ description:
+ - State of the repo file.
+ throttle:
+ required: false
+ default: null
+ description:
+ - Enable bandwidth throttling for downloads.
+ - This option can be expressed as a absolute data rate in bytes/sec. An
+ SI prefix (k, M or G) may be appended to the bandwidth value.
+ timeout:
+ required: false
+ default: 30
+ description:
+ - Number of seconds to wait for a connection before timing out.
+ ui_repoid_vars:
+ required: false
+ default: releasever basearch
+ description:
+ - When a repository id is displayed, append these yum variables to the
+ string if they are used in the I(baseurl)/etc. Variables are appended
+ in the order listed (and found).
+ username:
+ required: false
+ default: null
+ description:
+ - Username to use for basic authentication to a repo or really any url.
+
+extends_documentation_fragment:
+ - files
+
+notes:
+ - All comments will be removed if modifying an existing repo file.
+ - Section order is preserved in an existing repo file.
+ - Parameters in a section are ordered alphabetically in an existing repo
+ file.
+ - The repo file will be automatically deleted if it contains no repository.
+'''
+
+EXAMPLES = '''
+- name: Add repository
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+
+- name: Add multiple repositories into the same file (1/2)
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ file: external_repos
+ baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+ gpgcheck: no
+- name: Add multiple repositories into the same file (2/2)
+ yum_repository:
+ name: rpmforge
+ description: RPMforge YUM repo
+ file: external_repos
+ baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
+ mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
+ enabled: no
+
+- name: Remove repository
+ yum_repository:
+ name: epel
+ state: absent
+
+- name: Remove repository from a specific repo file
+ yum_repository:
+ name: epel
+ file: external_repos
+ state: absent
+
+#
+# Allow to overwrite the yum_repository parameters by defining the parameters
+# as a variable in the defaults or vars file:
+#
+# my_role_somerepo_params:
+# # Disable GPG checking
+# gpgcheck: no
+# # Remove the gpgkey option
+# gpgkey: null
+#
+- name: Add Some repo
+ yum_repository:
+ name: somerepo
+ description: Some YUM repo
+ baseurl: http://server.com/path/to/the/repo
+ gpgkey: http://server.com/keys/somerepo.pub
+ gpgcheck: yes
+ params: "{{ my_role_somerepo_params }}"
+'''
+
+RETURN = '''
+repo:
+ description: repository name
+ returned: success
+ type: string
+ sample: "epel"
+state:
+ description: state of the target, after execution
+ returned: success
+ type: string
+ sample: "present"
+'''
+
+
+class YumRepo(object):
+ # Class global variables
+ module = None
+ params = None
+ section = None
+ repofile = ConfigParser.RawConfigParser()
+
+ # List of parameters which will be allowed in the repo file output
+ allowed_params = [
+ 'async',
+ 'bandwidth',
+ 'baseurl',
+ 'cost',
+ 'deltarpm_metadata_percentage',
+ 'deltarpm_percentage',
+ 'enabled',
+ 'enablegroups',
+ 'exclude',
+ 'failovermethod',
+ 'gpgcakey',
+ 'gpgcheck',
+ 'gpgkey',
+ 'http_caching',
+ 'include',
+ 'includepkgs',
+ 'ip_resolve',
+ 'keepalive',
+ 'keepcache',
+ 'metadata_expire',
+ 'metadata_expire_filter',
+ 'metalink',
+ 'mirrorlist',
+ 'mirrorlist_expire',
+ 'name',
+ 'password',
+ 'priority',
+ 'protect',
+ 'proxy',
+ 'proxy_password',
+ 'proxy_username',
+ 'repo_gpgcheck',
+ 'retries',
+ 's3_enabled',
+ 'skip_if_unavailable',
+ 'sslcacert',
+ 'ssl_check_cert_permissions',
+ 'sslclientcert',
+ 'sslclientkey',
+ 'sslverify',
+ 'throttle',
+ 'timeout',
+ 'ui_repoid_vars',
+ 'username']
+
+ # List of parameters which can be a list
+ list_params = ['exclude', 'includepkgs']
+
+ def __init__(self, module):
+ # To be able to use fail_json
+ self.module = module
+ # Shortcut for the params
+ self.params = self.module.params
+ # Section is always the repoid
+ self.section = self.params['repoid']
+
+ # Check if repo directory exists
+ repos_dir = self.params['reposdir']
+ if not os.path.isdir(repos_dir):
+ self.module.fail_json(
+ msg="Repo directory '%s' does not exist." % repos_dir)
+
+ # Set dest; also used to set dest parameter for the FS attributes
+ self.params['dest'] = os.path.join(
+ repos_dir, "%s.repo" % self.params['file'])
+
+ # Read the repo file if it exists
+ if os.path.isfile(self.params['dest']):
+ self.repofile.read(self.params['dest'])
+
+ def add(self):
+ # Remove already existing repo and create a new one
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ # Add section
+ self.repofile.add_section(self.section)
+
+ # Baseurl/mirrorlist is not required because for removal we need only
+ # the repo name. This is why we check if the baseurl/mirrorlist is
+ # defined.
+ if (self.params['baseurl'], self.params['mirrorlist']) == (None, None):
+ self.module.fail_json(
+ msg='Paramater "baseurl" or "mirrorlist" is required for '
+ 'adding a new repo.')
+
+ # Set options
+ for key, value in sorted(self.params.items()):
+ if key in self.list_params and isinstance(value, list):
+ # Join items into one string for specific parameters
+ value = ' '.join(value)
+ elif isinstance(value, bool):
+ # Convert boolean value to integer
+ value = int(value)
+
+ # Set the value only if it was defined (default is None)
+ if value is not None and key in self.allowed_params:
+ self.repofile.set(self.section, key, value)
+
+ def save(self):
+ if len(self.repofile.sections()):
+ # Write data into the file
+ try:
+ fd = open(self.params['dest'], 'wb')
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot open repo file %s." % self.params['dest'],
+ details=str(e))
+
+ self.repofile.write(fd)
+
+ try:
+ fd.close()
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot write repo file %s." % self.params['dest'],
+ details=str(e))
+ else:
+ # Remove the file if there are not repos
+ try:
+ os.remove(self.params['dest'])
+ except OSError:
+ e = get_exception()
+ self.module.fail_json(
+ msg=(
+ "Cannot remove empty repo file %s." %
+ self.params['dest']),
+ details=str(e))
+
+ def remove(self):
+ # Remove section if exists
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ def dump(self):
+ repo_string = ""
+
+ # Compose the repo file
+ for section in sorted(self.repofile.sections()):
+ repo_string += "[%s]\n" % section
+
+ for key, value in sorted(self.repofile.items(section)):
+ repo_string += "%s = %s\n" % (key, value)
+
+ repo_string += "\n"
+
+ return repo_string
+
+
+def main():
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=dict(
+ async=dict(type='bool'),
+ bandwidth=dict(),
+ baseurl=dict(),
+ cost=dict(),
+ deltarpm_metadata_percentage=dict(),
+ deltarpm_percentage=dict(),
+ description=dict(),
+ enabled=dict(type='bool'),
+ enablegroups=dict(type='bool'),
+ exclude=dict(),
+ failovermethod=dict(choices=['roundrobin', 'priority']),
+ file=dict(),
+ gpgcakey=dict(),
+ gpgcheck=dict(type='bool'),
+ gpgkey=dict(),
+ http_caching=dict(choices=['all', 'packages', 'none']),
+ include=dict(),
+ includepkgs=dict(),
+ ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
+ keepalive=dict(type='bool'),
+ keepcache=dict(choices=['0', '1']),
+ metadata_expire=dict(),
+ metadata_expire_filter=dict(
+ choices=[
+ 'never',
+ 'read-only:past',
+ 'read-only:present',
+ 'read-only:future']),
+ metalink=dict(),
+ mirrorlist=dict(),
+ mirrorlist_expire=dict(),
+ name=dict(required=True),
+ params=dict(type='dict'),
+ password=dict(no_log=True),
+ priority=dict(),
+ protect=dict(type='bool'),
+ proxy=dict(),
+ proxy_password=dict(no_log=True),
+ proxy_username=dict(),
+ repo_gpgcheck=dict(type='bool'),
+ reposdir=dict(default='/etc/yum.repos.d', type='path'),
+ retries=dict(),
+ s3_enabled=dict(type='bool'),
+ skip_if_unavailable=dict(type='bool'),
+ sslcacert=dict(),
+ ssl_check_cert_permissions=dict(type='bool'),
+ sslclientcert=dict(),
+ sslclientkey=dict(),
+ sslverify=dict(type='bool'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ throttle=dict(),
+ timeout=dict(),
+ ui_repoid_vars=dict(),
+ username=dict(),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Update module parameters by user's parameters if defined
+ if 'params' in module.params and isinstance(module.params['params'], dict):
+ module.params.update(module.params['params'])
+ # Remove the params
+ module.params.pop('params', None)
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # Check if required parameters are present
+ if state == 'present':
+ if (
+ module.params['baseurl'] is None and
+ module.params['mirrorlist'] is None):
+ module.fail_json(
+ msg="Parameter 'baseurl' or 'mirrorlist' is required.")
+ if module.params['description'] is None:
+ module.fail_json(
+ msg="Parameter 'description' is required.")
+
+ # Rename "name" and "description" to ensure correct key sorting
+ module.params['repoid'] = module.params['name']
+ module.params['name'] = module.params['description']
+ del module.params['description']
+
+ # Define repo file name if it doesn't exist
+ if module.params['file'] is None:
+ module.params['file'] = module.params['repoid']
+
+ # Instantiate the YumRepo object
+ yumrepo = YumRepo(module)
+
+ # Get repo status before change
+ yumrepo_before = yumrepo.dump()
+
+ # Perform action depending on the state
+ if state == 'present':
+ yumrepo.add()
+ elif state == 'absent':
+ yumrepo.remove()
+
+ # Get repo status after change
+ yumrepo_after = yumrepo.dump()
+
+ # Compare repo states
+ changed = yumrepo_before != yumrepo_after
+
+ # Save the file only if not in check mode and if there was a change
+ if not module.check_mode and changed:
+ yumrepo.save()
+
+ # Change file attributes if needed
+ if os.path.isfile(module.params['dest']):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Print status of the change
+ module.exit_json(changed=changed, repo=name, state=state)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/zypper.py b/lib/ansible/modules/extras/packaging/os/zypper.py
new file mode 100644
index 0000000000..c956feac1a
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/zypper.py
@@ -0,0 +1,448 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from xml.dom.minidom import parseString as parseXML
+import re
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+version_added: "1.2"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ required: false
+ choices: [ present, latest, absent ]
+ default: "present"
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ version_added: "2.0"
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ disable_recommends:
+ version_added: "1.8"
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ force:
+ version_added: "2.2"
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ update_cache:
+ version_added: "2.2"
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "refresh" ]
+ oldpackage:
+ version_added: "2.2"
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+# Install "nmap"
+- zypper: name=nmap state=present
+
+# Install apache2 with recommended packages
+- zypper: name=apache2 state=present disable_recommends=no
+
+# Apply a given patch
+- zypper: name=openSUSE-2016-128 state=present type=patch
+
+# Remove the "nmap" package
+- zypper: name=nmap state=absent
+
+# Install the nginx rpm from a remote repo
+- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present
+
+# Install local rpm file
+- zypper: name=/tmp/fancy-software.rpm state=present
+
+# Update all packages
+- zypper: name=* state=latest
+
+# Apply all available patches
+- zypper: name=* state=latest type=patch
+
+# Refresh repositories and update package "openssl"
+- zypper: name=openssl state=present update_cache=yes
+
+# Install specific version (possible comparisons: <, >, <=, >=, =)
+- zypper: name=docker>=1.10 state=installed
+'''
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ return prefix, name, version
+ except:
+ return prefix, name, None
+
+
+def get_want_state(m, names, remove=False):
+ packages_install = {}
+ packages_remove = {}
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix in ['-', '~']:
+ packages_remove[pname] = version
+ elif prefix == '+':
+ packages_install[pname] = version
+ else:
+ if remove:
+ packages_remove[pname] = version
+ else:
+ packages_install[pname] = version
+ return packages_install, packages_remove, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend(packages)
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ dom = parseXML(stdout)
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.'%rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ cmd.append(subcommand)
+ if subcommand != 'patch' and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ name_install, name_remove, urls = get_want_state(m, name)
+
+ # if a version string is given, pass it to zypper
+ install_version = [p+name_install[p] for p in name_install if name_install[p]]
+ remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]]
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if install_version or remove_version:
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ install_and_remove = name_install.copy()
+ install_and_remove.update(name_remove)
+ prerun_state = get_installed_state(m, install_and_remove)
+ # generate lists of packages to install or remove
+ name_install = [p for p in name_install if p not in prerun_state]
+ name_remove = [p for p in name_remove if p in prerun_state]
+ if not any((name_install, name_remove, urls, install_version, remove_version)):
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+
+ # pass packages with version information
+ cmd.extend(install_version)
+ cmd.extend(['-%s' % p for p in remove_version])
+
+ # allow for + or - prefixes in install/remove lists
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend(name_install)
+ cmd.extend(['-%s' % p for p in name_remove])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ name_install, name_remove, urls = get_want_state(m, name, remove=True)
+ if name_install:
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, name_remove)
+ remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]]
+ name_remove = [p for p in name_remove if p in prerun_state]
+ if not name_remove and not remove_version:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend(name_remove)
+ cmd.extend(remove_version)
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True, aliases=['pkg'], type='list'),
+ state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ disable_gpg_check = dict(required=False, default='no', type='bool'),
+ disable_recommends = dict(required=False, default='yes', type='bool'),
+ force = dict(required=False, default='no', type='bool'),
+ update_cache = dict(required=False, aliases=['refresh'], default='no', type='bool'),
+ oldpackage = dict(required=False, default='no', type='bool'),
+ ),
+ supports_check_mode = True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = filter(None, name)
+
+ # Refresh repositories
+ if update_cache:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state == 'latest':
+ packages_changed, retvals = package_update_all(module)
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/packaging/os/zypper_repository.py b/lib/ansible/modules/extras/packaging/os/zypper_repository.py
new file mode 100644
index 0000000000..5a06e6f9de
--- /dev/null
+++ b/lib/ansible/modules/extras/packaging/os/zypper_repository.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# (c) 2014, Justin Lecher <jlec@gentoo.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: zypper_repository
+author: "Matthias Vogelgesang (@matze)"
+version_added: "1.4"
+short_description: Add and remove Zypper repositories
+description:
+ - Add or remove Zypper repositories on SUSE and openSUSE
+options:
+ name:
+ required: false
+ default: none
+ description:
+ - A name for the repository. Not required when adding repofiles.
+ repo:
+ required: false
+ default: none
+ description:
+ - URI of the repository or .repo file. Required when state=present.
+ state:
+ required: false
+ choices: [ "absent", "present" ]
+ default: "present"
+ description:
+ - A source string state.
+ description:
+ required: false
+ default: none
+ description:
+ - A description of the repository
+ disable_gpg_check:
+ description:
+ - Whether to disable GPG signature checking of
+ all packages. Has an effect only if state is
+ I(present).
+ - Needs zypper version >= 1.6.2.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ autorefresh:
+ description:
+ - Enable autorefresh of the repository.
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ required: false
+ version_added: "2.1"
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ version_added: "2.2"
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ version_added: "2.2"
+
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+'''
+
+EXAMPLES = '''
+# Add NVIDIA repository for graphics drivers
+- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present
+
+# Remove NVIDIA repository
+- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent
+
+# Add python development repository
+- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo
+
+# Refresh all repos
+- zypper_repository: repo=* runrefresh=yes
+
+# Add a repo and add it's gpg key
+- zypper_repository: repo=http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/ auto_import_keys=yes
+
+# Force refresh of a repository
+- zypper_repository: repo=http://my_internal_ci_repo/repo name=my_ci_repo state=present runrefresh=yes
+'''
+
+REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
+
+from distutils.version import LooseVersion
+
+def _get_cmd(*args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
+
+def _parse_repos(module):
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd('--xmlout', 'repos')
+
+ from xml.dom.minidom import parseString as parseXML
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ repos = []
+ dom = parseXML(stdout)
+ repo_list = dom.getElementsByTagName('repo')
+ for repo in repo_list:
+ opts = {}
+ for o in REPO_OPTS:
+ opts[o] = repo.getAttribute(o)
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ # A repo can be uniquely identified by an alias + url
+ repos.append(opts)
+ return repos
+ # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
+ elif rc == 6:
+ return []
+ else:
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
+def _repo_changes(realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
+ return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
+ return False
+
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
+
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
+
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd('addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
+
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+
+ if repodata['autorefresh'] == '1':
+ cmd.append('--refresh')
+
+ cmd.append(repo)
+
+ if not repo.endswith('.repo'):
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc, stdout, stderr
+
+
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd('removerepo', repo)
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ cmd = _get_cmd('refresh', '--force')
+ if auto_import_keys:
+ cmd.append('--gpg-auto-import-keys')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ repo=dict(required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default='no', type='bool'),
+ description=dict(required=False),
+ disable_gpg_check = dict(required=False, default=False, type='bool'),
+ autorefresh = dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority = dict(required=False, type='int'),
+ enabled = dict(required=False, default=True, type='bool'),
+ overwrite_multiple = dict(required=False, default=False, type='bool'),
+ auto_import_keys = dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=False,
+ required_one_of = [['state','runrefresh']],
+ )
+
+ repo = module.params['repo']
+ alias = module.params['name']
+ state = module.params['state']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
+
+ def exit_unchanged():
+ module.exit_json(changed=False, repodata=repodata, state=state)
+
+ # Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
+ if state == 'present' and not repo:
+ module.fail_json(msg='Module option state=present requires repo')
+ if state == 'absent' and not repo and not alias:
+ module.fail_json(msg='Alias or repo parameter required when state=absent')
+
+ if repo and repo.endswith('.repo'):
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
+ else:
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
+
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if repo:
+ shortname = repo
+ else:
+ shortname = alias
+
+ if state == 'present':
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
+ exit_unchanged()
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
+ elif state == 'absent':
+ if not exists:
+ exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
+
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/remote_management/__init__.py b/lib/ansible/modules/extras/remote_management/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/remote_management/__init__.py
diff --git a/lib/ansible/modules/extras/remote_management/ipmi/__init__.py b/lib/ansible/modules/extras/remote_management/ipmi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/remote_management/ipmi/__init__.py
diff --git a/lib/ansible/modules/extras/remote_management/ipmi/ipmi_boot.py b/lib/ansible/modules/extras/remote_management/ipmi/ipmi_boot.py
new file mode 100644
index 0000000000..e8f13d8bd7
--- /dev/null
+++ b/lib/ansible/modules/extras/remote_management/ipmi/ipmi_boot.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ command = None
+
+from ansible.module_utils.basic import *
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ required: false
+ type: int
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ default: null
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ required: true
+ choices:
+ - network -- Request network boot
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ default: present
+ choices:
+ - present -- Request system turn on
+ - absent -- Request system turn on
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ required: false
+ type: boolean
+ default: false
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ required: false
+ type: boolean
+ default: false
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (gaifullinbf@gmail.com)"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: string
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+# Ensure bootdevice is HD.
+- ipmi_boot: name="test.testdomain.com" user="admin" password="password" bootdev="hd"
+# Ensure bootdevice is not Network
+- ipmi_boot: name="test.testdomain.com" user="admin" password="password" bootdev="network" state=absent
+'''
+
+# ==================================================
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg='the python pyghmi module is required')
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/remote_management/ipmi/ipmi_power.py b/lib/ansible/modules/extras/remote_management/ipmi/ipmi_power.py
new file mode 100644
index 0000000000..c6cc8df030
--- /dev/null
+++ b/lib/ansible/modules/extras/remote_management/ipmi/ipmi_power.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ command = None
+
+from ansible.module_utils.basic import *
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ required: false
+ type: int
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ default: null
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ required: true
+ choices:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ required: false
+ type: int
+ default: 300
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (gaifullinbf@gmail.com)"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success
+ type: string
+ sample: on
+'''
+
+EXAMPLES = '''
+# Ensure machine is powered on.
+- ipmi_power: name="test.testdomain.com" user="admin" password="password" state="on"
+'''
+
+# ==================================================
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(default=300, type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg='the python pyghmi module is required')
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+ changed = False
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/shippable.yml b/lib/ansible/modules/extras/shippable.yml
new file mode 100644
index 0000000000..23d731058e
--- /dev/null
+++ b/lib/ansible/modules/extras/shippable.yml
@@ -0,0 +1,65 @@
+language: python
+
+env:
+ matrix:
+ - TEST=none
+
+matrix:
+ exclude:
+ - env: TEST=none
+ include:
+ - env: TEST=integration IMAGE=ansible/ansible:centos6
+ - env: TEST=integration IMAGE=ansible/ansible:centos7 PRIVILEGED=true
+ - env: TEST=integration IMAGE=ansible/ansible:fedora-rawhide PRIVILEGED=true
+ - env: TEST=integration IMAGE=ansible/ansible:fedora23 PRIVILEGED=true
+ - env: TEST=integration IMAGE=ansible/ansible:opensuseleap PRIVILEGED=true
+ - env: TEST=integration IMAGE=ansible/ansible:ubuntu1204 PRIVILEGED=true
+ - env: TEST=integration IMAGE=ansible/ansible:ubuntu1404 PRIVILEGED=true
+ - env: TEST=integration IMAGE=ansible/ansible:ubuntu1604 PRIVILEGED=true
+
+ - env: TEST=integration IMAGE=ansible/ansible:ubuntu1604py3 PYTHON3=1 PRIVILEGED=true
+
+ - env: TEST=integration IMAGE=ansible/ansible:ubuntu1604py3 PYTHON3=1
+
+ - env: TEST=integration PLATFORM=windows VERSION=2008-SP2
+ - env: TEST=integration PLATFORM=windows VERSION=2008-R2_SP1
+ - env: TEST=integration PLATFORM=windows VERSION=2012-RTM
+ - env: TEST=integration PLATFORM=windows VERSION=2012-R2_RTM
+
+ - env: TEST=integration PLATFORM=freebsd VERSION=10.3-STABLE PRIVILEGED=true
+
+ - env: TEST=integration PLATFORM=osx VERSION=10.11
+
+ - env: TEST=sanity INSTALL_DEPS=1
+
+ - env: TEST=docs
+build:
+ pre_ci_boot:
+ options: "--privileged=false --net=bridge"
+ ci:
+ - test/utils/shippable/ci.sh
+
+integrations:
+ notifications:
+ - integrationName: email
+ type: email
+ on_success: never
+ on_failure: never
+ on_start: never
+ on_pull_request: never
+ - integrationName: irc
+ type: irc
+ recipients:
+ - "chat.freenode.net#ansible-notices"
+ on_success: change
+ on_failure: always
+ on_start: never
+ on_pull_request: always
+ - integrationName: slack
+ type: slack
+ recipients:
+ - "#shippable"
+ on_success: change
+ on_failure: always
+ on_start: never
+ on_pull_request: never
diff --git a/lib/ansible/modules/extras/source_control/__init__.py b/lib/ansible/modules/extras/source_control/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/__init__.py
diff --git a/lib/ansible/modules/extras/source_control/bzr.py b/lib/ansible/modules/extras/source_control/bzr.py
new file mode 100644
index 0000000000..e6cfe9f1ea
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/bzr.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, André Paramés <git@andreparames.com>
+# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = u'''
+---
+module: bzr
+author: "André Paramés (@andreparames)"
+version_added: "1.1"
+short_description: Deploy software (or files) from bzr branches
+description:
+ - Manage I(bzr) branches to deploy files or software.
+options:
+ name:
+ required: true
+ aliases: [ 'parent' ]
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ dest:
+ required: true
+ description:
+ - Absolute path of where the branch should be cloned to.
+ version:
+ required: false
+ default: "head"
+ description:
+ - What version of the branch to clone. This can be the
+ bzr revno or revid.
+ force:
+ required: false
+ default: "no"
+ choices: [ 'yes', 'no' ]
+ description:
+ - If C(yes), any modified files in the working
+ tree will be discarded. Before 1.9 the default
+ value was "yes".
+ executable:
+ required: false
+ default: null
+ version_added: "1.4"
+ description:
+ - Path to bzr executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+'''
+
+EXAMPLES = '''
+# Example bzr checkout from Ansible Playbooks
+- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22
+'''
+
+import re
+
+
+class Bzr(object):
+ def __init__(self, module, parent, dest, version, bzr_path):
+ self.module = module
+ self.parent = parent
+ self.dest = dest
+ self.version = version
+ self.bzr_path = bzr_path
+
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
+ return (rc, out, err)
+
+ def get_version(self):
+ '''samples the version of the bzr branch'''
+
+ cmd = "%s revno" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
+ return revno
+
+ def clone(self):
+ '''makes a new bzr branch if it does not already exist'''
+ dest_dirname = os.path.dirname(self.dest)
+ try:
+ os.makedirs(dest_dirname)
+ except:
+ pass
+ if self.version.lower() != 'head':
+ args_list = ["branch", "-r", self.version, self.parent, self.dest]
+ else:
+ args_list = ["branch", self.parent, self.dest]
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
+
+ def has_local_mods(self):
+
+ cmd = "%s status -S" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
+ lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+ return len(lines) > 0
+
+ def reset(self, force):
+ '''
+ Resets the index and working tree to head.
+ Discards any changes to tracked files in the working
+ tree since that commit.
+ '''
+ if not force and self.has_local_mods():
+ self.module.fail_json(msg="Local modifications exist in branch (force=no).")
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
+
+ def fetch(self):
+ '''updates branch from remote sources'''
+ if self.version.lower() != 'head':
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
+ else:
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to pull")
+ return (rc, out, err)
+
+ def switch_version(self):
+ '''once pulled, switch to a particular revno or revid'''
+ if self.version.lower() != 'head':
+ args_list = ["revert", "-r", self.version]
+ else:
+ args_list = ["revert"]
+ return self._command(args_list, check_rc=True, cwd=self.dest)
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ dest=dict(required=True, type='path'),
+ name=dict(required=True, aliases=['parent']),
+ version=dict(default='head'),
+ force=dict(default='no', type='bool'),
+ executable=dict(default=None),
+ )
+ )
+
+ dest = module.params['dest']
+ parent = module.params['name']
+ version = module.params['version']
+ force = module.params['force']
+ bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
+
+ bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
+
+ rc, out, err, status = (0, None, None, None)
+
+ bzr = Bzr(module, parent, dest, version, bzr_path)
+
+ # if there is no bzr configuration, do a branch operation
+ # else pull and switch the version
+ before = None
+ local_mods = False
+ if not os.path.exists(bzrconfig):
+ (rc, out, err) = bzr.clone()
+
+ else:
+ # else do a pull
+ local_mods = bzr.has_local_mods()
+ before = bzr.get_version()
+ (rc, out, err) = bzr.reset(force)
+ if rc != 0:
+ module.fail_json(msg=err)
+ (rc, out, err) = bzr.fetch()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ # switch to version specified regardless of whether
+ # we cloned or pulled
+ (rc, out, err) = bzr.switch_version()
+
+ # determine if we changed anything
+ after = bzr.get_version()
+ changed = False
+
+ if before != after or local_mods:
+ changed = True
+
+ module.exit_json(changed=changed, before=before, after=after)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/source_control/git_config.py b/lib/ansible/modules/extras/source_control/git_config.py
new file mode 100644
index 0000000000..1e229b7bc1
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/git_config.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Marius Gedminas <marius@pov.lt>
+# (c) 2016, Matthew Gamble <git@matthewgamble.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - "Matthew Gamble"
+ - "Marius Gedminas"
+version_added: 2.1
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The M(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you don't want to use M(template) for the entire git
+ config file (e.g. because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(command) are cumbersone or
+ don't work correctly in check mode.
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope))
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ required: false
+ default: null
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ required: false
+ default: null
+ scope:
+ description:
+ - Specify which scope to read/set values from. This is required
+ when setting config values. If this is set to local, you must
+ also specify the repo parameter. It defaults to system only when
+ not using I(list_all)=yes.
+ required: false
+ choices: [ "local", "global", "system" ]
+ default: null
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+# Set some settings in ~/.gitconfig
+- git_config: name=alias.ci scope=global value=commit
+- git_config: name=alias.st scope=global value=status
+
+# Or system-wide:
+- git_config: name=alias.remotev scope=system value="remote -v"
+- git_config: name=core.editor scope=global value=vim
+# scope=system is the default
+- git_config: name=alias.diffc value="diff --cached"
+- git_config: name=color.ui value=auto
+
+# Make etckeeper not complain when invoked by cron
+- git_config: name=user.email repo=/etc scope=local value="root@{{ ansible_fqdn }}"
+
+# Read individual values from git config
+- git_config: name=alias.ci scope=global
+# scope=system is also assumed when reading values, unless list_all=yes
+- git_config: name=alias.diffc
+
+# Read all values from git config
+- git_config: list_all=yes scope=global
+# When list_all=yes and no scope is specified, you get configuration from all scopes
+- git_config: list_all=yes
+# Specify a repository to include local settings
+- git_config: list_all=yes repo=/path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When list_all=no and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: string
+ sample: "vim"
+
+config_values:
+ description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dictionary
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
+ value=dict(required=False)
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']],
+ required_if=[('scope', 'local', ['repo'])],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git')
+ if not git_path:
+ module.fail_json(msg="Could not find git. Please ensure it is installed.")
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config"]
+ if params['list_all']:
+ args.append('-l')
+ if scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(' '.join(args), cwd=dir)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ new_value_quoted = "'" + new_value + "'"
+ (rc, out, err) = module.run_command(' '.join(args + [new_value_quoted]), cwd=dir)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args + [new_value_quoted]))
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=new_value + "\n"
+ ),
+ changed=True
+ )
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/source_control/github_hooks.py b/lib/ansible/modules/extras/source_control/github_hooks.py
new file mode 100644
index 0000000000..8d3c120a78
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/github_hooks.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Phillip Gentry <phillip@cx.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import base64
+
+DOCUMENTATION = '''
+---
+module: github_hooks
+short_description: Manages github service hooks.
+description:
+ - Adds service hooks and removes service hooks that have an error status.
+version_added: "1.4"
+options:
+ user:
+ description:
+ - Github username.
+ required: true
+ oauthkey:
+ description:
+ - The oauth key provided by github. It can be found/generated on github under "Edit Your Profile" >> "Applications" >> "Personal Access Tokens"
+ required: true
+ repo:
+ description:
+ - "This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. Note this is different than the normal repo url."
+ required: true
+ hookurl:
+ description:
+ - When creating a new hook, this is the url that you want github to post to. It is only required when creating a new hook.
+ required: false
+ action:
+ description:
+ - This tells the githooks module what you want it to do.
+ required: true
+ choices: [ "create", "cleanall", "list", "clean504" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ content_type:
+ description:
+ - Content type to use for requests made to the webhook
+ required: false
+ default: 'json'
+ choices: ['json', 'form']
+
+author: "Phillip Gentry, CX Inc (@pcgentry)"
+'''
+
+EXAMPLES = '''
+# Example creating a new service hook. It ignores duplicates.
+- github_hooks: action=create hookurl=http://11.111.111.111:2222 user={{ gituser }} oauthkey={{ oauthkey }} repo=https://api.github.com/repos/pcgentry/Github-Auto-Deploy
+
+# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler.
+- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }}
+'''
+
+def _list(module, hookurl, oauthkey, repo, user):
+ url = "%s/hooks" % repo
+ auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ return False, ''
+ else:
+ return False, response.read()
+
+def _clean504(module, hookurl, oauthkey, repo, user):
+ current_hooks = _list(hookurl, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] == 504:
+ # print "Last response was an ERROR for hook:"
+ # print hook['id']
+ _delete(module, hookurl, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+def _cleanall(module, hookurl, oauthkey, repo, user):
+ current_hooks = _list(hookurl, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] != 200:
+ # print "Last response was an ERROR for hook:"
+ # print hook['id']
+ _delete(module, hookurl, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+def _create(module, hookurl, oauthkey, repo, user, content_type):
+ url = "%s/hooks" % repo
+ values = {
+ "active": True,
+ "name": "web",
+ "config": {
+ "url": "%s" % hookurl,
+ "content_type": "%s" % content_type
+ }
+ }
+ data = json.dumps(values)
+ auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ return 0, '[]'
+ else:
+ return 0, response.read()
+
+def _delete(module, hookurl, oauthkey, repo, user, hookid):
+ url = "%s/hooks/%s" % (repo, hookid)
+ auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
+ return response.read()
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=['list','clean504','cleanall','create']),
+ hookurl=dict(required=False),
+ oauthkey=dict(required=True, no_log=True),
+ repo=dict(required=True),
+ user=dict(required=True),
+ validate_certs=dict(default='yes', type='bool'),
+ content_type=dict(default='json', choices=['json', 'form']),
+ )
+ )
+
+ action = module.params['action']
+ hookurl = module.params['hookurl']
+ oauthkey = module.params['oauthkey']
+ repo = module.params['repo']
+ user = module.params['user']
+ content_type = module.params['content_type']
+
+ if action == "list":
+ (rc, out) = _list(module, hookurl, oauthkey, repo, user)
+
+ if action == "clean504":
+ (rc, out) = _clean504(module, hookurl, oauthkey, repo, user)
+
+ if action == "cleanall":
+ (rc, out) = _cleanall(module, hookurl, oauthkey, repo, user)
+
+ if action == "create":
+ (rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/source_control/github_key.py b/lib/ansible/modules/extras/source_control/github_key.py
new file mode 100644
index 0000000000..815be9dc94
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/github_key.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys.
+description:
+ - Creates, removes, or updates GitHub access keys.
+version_added: "2.2"
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ name:
+ description:
+ - SSH key name
+ required: true
+ pubkey:
+ description:
+ - SSH public key value. Required when C(state=present).
+ required: false
+ default: none
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ required: false
+ force:
+ description:
+ - The default is C(yes), which will replace the existing remote key
+ if it's different than C(pubkey). If C(no), the key will only be
+ set if no key with the given C(name) exists.
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: 'Access Key for Some Machine'
+ token: '{{github_access_token}}'
+ pubkey: '{{ssh_pub_key.stdout}}'
+'''
+
+
+import sys # noqa
+import json
+import re
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = re.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ while url:
+ r = session.request('GET', url)
+ for key in r.json():
+ yield key
+
+ url = r.links().get('next')
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key[id])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(session, name, pubkey, force, check_mode):
+ matching_keys = [k for k in get_all_keys(session) if k['title'] == name]
+ deleted_keys = []
+
+ if matching_keys and force and matching_keys[0]['key'] != pubkey:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+
+ # Strip out comment so we can compare to the keys GitHub returns.
+ pubkey = ' '.join(pubkey_parts[:2])
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import * # noqa
+from ansible.module_utils.urls import * # noqa
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/source_control/github_release.py b/lib/ansible/modules/extras/source_control/github_release.py
new file mode 100644
index 0000000000..daeb005e87
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/github_release.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about Github Releases
+version_added: 2.2
+options:
+ token:
+ required: true
+ description:
+ - Github Personal Access Token for authenticating
+ user:
+ required: true
+ description:
+ - The GitHub account that owns the repository
+ repo:
+ required: true
+ description:
+ - Repository name
+ action:
+ required: true
+ description:
+ - Action to perform
+ choices: [ 'latest_release' ]
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of test/test
+ github:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+'''
+
+RETURN = '''
+latest_release:
+ description: Version of the latest release
+ type: string
+ returned: success
+ sample: 1.1.0
+'''
+
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ HAS_GITHUB_API = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ token=dict(required=True, no_log=True),
+ action=dict(required=True, choices=['latest_release']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg='Missing requried github3 module (check docs or install with: pip install github3)')
+
+ repo = module.params['repo']
+ user = module.params['user']
+ login_token = module.params['token']
+ action = module.params['action']
+
+ # login to github
+ try:
+ gh = github3.login(token=str(login_token))
+ # test if we're actually logged in
+ gh.me()
+ except github3.AuthenticationFailed:
+ e = get_exception()
+ module.fail_json(msg='Failed to connect to Github: %s' % e)
+
+ repository = gh.repository(str(user), str(repo))
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/source_control/gitlab_group.py b/lib/ansible/modules/extras/source_control/gitlab_group.py
new file mode 100644
index 0000000000..a5fa98d13f
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/gitlab_group.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes Gitlab Groups
+description:
+ - When the group does not exists in Gitlab, it will be created.
+ - When the group does exists and state=absent, the group will be deleted.
+version_added: "2.1"
+author: "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - pyapi-gitlab python module
+options:
+ server_url:
+ description:
+ - Url of Gitlab server, with protocol (http or https).
+ required: true
+ validate_certs:
+ description:
+ - When using https if SSL certificate needs to be verified.
+ required: false
+ default: true
+ aliases:
+ - verify_ssl
+ login_user:
+ description:
+ - Gitlab user name.
+ required: false
+ default: null
+ login_password:
+ description:
+ - Gitlab password for login_user
+ required: false
+ default: null
+ login_token:
+ description:
+ - Gitlab token for logging in.
+ required: false
+ default: null
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ path:
+ description:
+ - The path of the group you want to create, this will be server_url/group_path
+ - If not supplied, the group_name will be used.
+ required: false
+ default: null
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+'''
+
+EXAMPLES = '''
+- name: "Delete Gitlab Group"
+ local_action: gitlab_group
+ server_url="http://gitlab.dj-wasabi.local"
+ validate_certs=false
+ login_token="WnUzDsxjy8230-Dy_k"
+ name=my_first_group
+ state=absent
+
+- name: "Create Gitlab Group"
+ local_action: gitlab_group
+ server_url="https://gitlab.dj-wasabi.local"
+ validate_certs=true
+ login_user=dj-wasabi
+ login_password="MySecretPassword"
+ name=my_first_group
+ path=my_first_group
+ state=present
+'''
+
+RETURN = '''# '''
+
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except:
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+class GitLabGroup(object):
+ def __init__(self, module, git):
+ self._module = module
+ self._gitlab = git
+
+ def createGroup(self, group_name, group_path):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self._gitlab.creategroup(group_name, group_path)
+
+ def deleteGroup(self, group_name):
+ is_group_empty = True
+ group_id = self.idGroup(group_name)
+
+ for project in self._gitlab.getall(self._gitlab.getprojects):
+ owner = project['namespace']['name']
+ if owner == group_name:
+ is_group_empty = False
+
+ if is_group_empty:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self._gitlab.deletegroup(group_id)
+ else:
+ self._module.fail_json(msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+
+ def existsGroup(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return True
+ return False
+
+ def idGroup(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return group['id']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
+ login_user=dict(required=False, no_log=True),
+ login_password=dict(required=False, no_log=True),
+ login_token=dict(required=False, no_log=True),
+ name=dict(required=True),
+ path=dict(required=False),
+ state=dict(default="present", choices=["present", "absent"]),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg="Missing requried gitlab module (check docs or install with: pip install pyapi-gitlab")
+
+ server_url = module.params['server_url']
+ verify_ssl = module.params['validate_certs']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_token = module.params['login_token']
+ group_name = module.params['name']
+ group_path = module.params['path']
+ state = module.params['state']
+
+ # We need both login_user and login_password or login_token, otherwise we fail.
+ if login_user is not None and login_password is not None:
+ use_credentials = True
+ elif login_token is not None:
+ use_credentials = False
+ else:
+ module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
+
+ # Set group_path to group_name if it is empty.
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ # Lets make an connection to the Gitlab server_url, with either login_user and login_password
+ # or with login_token
+ try:
+ if use_credentials:
+ git = gitlab.Gitlab(host=server_url)
+ git.login(user=login_user, password=login_password)
+ else:
+ git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
+
+ # Validate if group exists and take action based on "state"
+ group = GitLabGroup(module, git)
+ group_name = group_name.lower()
+ group_exists = group.existsGroup(group_name)
+
+ if group_exists and state == "absent":
+ group.deleteGroup(group_name)
+ module.exit_json(changed=True, result="Successfully deleted group %s" % group_name)
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="Group deleted or does not exists")
+ else:
+ if group_exists:
+ module.exit_json(changed=False)
+ else:
+ if group.createGroup(group_name, group_path):
+ module.exit_json(changed=True, result="Successfully created or updated the group %s" % group_name)
+
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/source_control/gitlab_project.py b/lib/ansible/modules/extras/source_control/gitlab_project.py
new file mode 100644
index 0000000000..da21589186
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/gitlab_project.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes Gitlab Projects
+description:
+ - When the project does not exists in Gitlab, it will be created.
+ - When the project does exists and state=absent, the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+version_added: "2.1"
+author: "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - pyapi-gitlab python module
+options:
+ server_url:
+ description:
+ - Url of Gitlab server, with protocol (http or https).
+ required: true
+ validate_certs:
+ description:
+ - When using https if SSL certificate needs to be verified.
+ required: false
+ default: true
+ aliases:
+ - verify_ssl
+ login_user:
+ description:
+ - Gitlab user name.
+ required: false
+ default: null
+ login_password:
+ description:
+ - Gitlab password for login_user
+ required: false
+ default: null
+ login_token:
+ description:
+ - Gitlab token for logging in.
+ required: false
+ default: null
+ group:
+ description:
+ - The name of the group of which this projects belongs to.
+ - When not provided, project will belong to user which is configured in 'login_user' or 'login_token'
+ - When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights.
+ required: false
+ default: null
+ name:
+ description:
+ - The name of the project
+ required: true
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url/<group>/path
+ - If not supplied, name will be used.
+ required: false
+ default: null
+ description:
+ description:
+ - An description for the project.
+ required: false
+ default: null
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ public:
+ description:
+ - If the project is public available or not.
+ - Setting this to true is same as setting visibility_level to 20.
+ - Possible values are true and false.
+ required: false
+ default: false
+ visibility_level:
+ description:
+ - Private. visibility_level is 0. Project access must be granted explicitly for each user.
+ - Internal. visibility_level is 10. The project can be cloned by any logged in user.
+ - Public. visibility_level is 20. The project can be cloned without any authentication.
+ - Possible values are 0, 10 and 20.
+ required: false
+ default: 0
+ import_url:
+ description:
+ - Git repository which will me imported into gitlab.
+ - Gitlab server needs read access to this git repository.
+ required: false
+ default: false
+ state:
+ description:
+ - create or delete project.
+ - Possible values are present and absent.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+'''
+
+EXAMPLES = '''
+- name: "Delete Gitlab Project"
+ local_action: gitlab_project
+ server_url="http://gitlab.dj-wasabi.local"
+ validate_certs=false
+ login_token="WnUzDsxjy8230-Dy_k"
+ name=my_first_project
+ state=absent
+
+- name: "Create Gitlab Project in group Ansible"
+ local_action: gitlab_project
+ server_url="https://gitlab.dj-wasabi.local"
+ validate_certs=true
+ login_user=dj-wasabi
+ login_password="MySecretPassword"
+ name=my_first_project
+ group=ansible
+ issues_enabled=false
+ wiki_enabled=true
+ snippets_enabled=true
+ import_url="http://git.example.com/example/lab.git"
+ state=present
+'''
+
+RETURN = '''# '''
+
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except:
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+
+class GitLabProject(object):
+ def __init__(self, module, git):
+ self._module = module
+ self._gitlab = git
+
+ def createOrUpdateProject(self, project_exists, group_name, import_url, arguments):
+ is_user = False
+ group_id = self.getGroupId(group_name)
+ if not group_id:
+ group_id = self.getUserId(group_name)
+ is_user = True
+
+ if project_exists:
+ # Edit project
+ return self.updateProject(group_name, arguments)
+ else:
+ # Create project
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self.createProject(is_user, group_id, import_url, arguments)
+
+ def createProject(self, is_user, user_id, import_url, arguments):
+ if is_user:
+ return self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments)
+ else:
+ group_id = user_id
+ return self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments)
+
+ def deleteProject(self, group_name, project_name):
+ if self.existsGroup(group_name):
+ project_owner = group_name
+ else:
+ project_owner = self._gitlab.currentuser()['username']
+
+ search_results = self._gitlab.searchproject(search=project_name)
+ for result in search_results:
+ owner = result['namespace']['name']
+ if owner == project_owner:
+ return self._gitlab.deleteproject(result['id'])
+
+ def existsProject(self, group_name, project_name):
+ if self.existsGroup(group_name):
+ project_owner = group_name
+ else:
+ project_owner = self._gitlab.currentuser()['username']
+
+ search_results = self._gitlab.searchproject(search=project_name)
+ for result in search_results:
+ owner = result['namespace']['name']
+ if owner == project_owner:
+ return True
+ return False
+
+ def existsGroup(self, group_name):
+ if group_name is not None:
+ # Find the group, if group not exists we try for user
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return True
+
+ user_name = group_name
+ user_data = self._gitlab.getusers(search=user_name)
+ for data in user_data:
+ if 'id' in user_data:
+ return True
+ return False
+
+ def getGroupId(self, group_name):
+ if group_name is not None:
+ # Find the group, if group not exists we try for user
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return group['id']
+
+ def getProjectId(self, group_name, project_name):
+ if self.existsGroup(group_name):
+ project_owner = group_name
+ else:
+ project_owner = self._gitlab.currentuser()['username']
+
+ search_results = self._gitlab.searchproject(search=project_name)
+ for result in search_results:
+ owner = result['namespace']['name']
+ if owner == project_owner:
+ return result['id']
+
+ def getUserId(self, user_name):
+ user_data = self._gitlab.getusers(search=user_name)
+
+ for data in user_data:
+ if 'id' in data:
+ return data['id']
+ return self._gitlab.currentuser()['id']
+
+ def to_bool(self, value):
+ if value:
+ return 1
+ else:
+ return 0
+
+ def updateProject(self, group_name, arguments):
+ project_changed = False
+ project_name = arguments['name']
+ project_id = self.getProjectId(group_name, project_name)
+ project_data = self._gitlab.getproject(project_id=project_id)
+
+ for arg_key, arg_value in arguments.items():
+ project_data_value = project_data[arg_key]
+
+ if isinstance(project_data_value, bool) or project_data_value is None:
+ to_bool = self.to_bool(project_data_value)
+ if to_bool != arg_value:
+ project_changed = True
+ continue
+ else:
+ if project_data_value != arg_value:
+ project_changed = True
+
+ if project_changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self._gitlab.editproject(project_id=project_id, **arguments)
+ else:
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
+ login_user=dict(required=False, no_log=True),
+ login_password=dict(required=False, no_log=True),
+ login_token=dict(required=False, no_log=True),
+ group=dict(required=False),
+ name=dict(required=True),
+ path=dict(required=False),
+ description=dict(required=False),
+ issues_enabled=dict(default=True, type='bool'),
+ merge_requests_enabled=dict(default=True, type='bool'),
+ wiki_enabled=dict(default=True, type='bool'),
+ snippets_enabled=dict(default=True, type='bool'),
+ public=dict(default=False, type='bool'),
+ visibility_level=dict(default="0", choices=["0", "10", "20"]),
+ import_url=dict(required=False),
+ state=dict(default="present", choices=["present", 'absent']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
+
+ server_url = module.params['server_url']
+ verify_ssl = module.params['validate_certs']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_token = module.params['login_token']
+ group_name = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ description = module.params['description']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ public = module.params['public']
+ visibility_level = module.params['visibility_level']
+ import_url = module.params['import_url']
+ state = module.params['state']
+
+ # We need both login_user and login_password or login_token, otherwise we fail.
+ if login_user is not None and login_password is not None:
+ use_credentials = True
+ elif login_token is not None:
+ use_credentials = False
+ else:
+ module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ # Gitlab API makes no difference between upper and lower cases, so we lower them.
+ project_name = project_name.lower()
+ project_path = project_path.lower()
+ if group_name is not None:
+ group_name = group_name.lower()
+
+ # Lets make an connection to the Gitlab server_url, with either login_user and login_password
+ # or with login_token
+ try:
+ if use_credentials:
+ git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl)
+ git.login(user=login_user, password=login_password)
+ else:
+ git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
+
+ # Validate if project exists and take action based on "state"
+ project = GitLabProject(module, git)
+ project_exists = project.existsProject(group_name, project_name)
+
+ # Creating the project dict
+ arguments = {"name": project_name,
+ "path": project_path,
+ "description": description,
+ "issues_enabled": project.to_bool(issues_enabled),
+ "merge_requests_enabled": project.to_bool(merge_requests_enabled),
+ "wiki_enabled": project.to_bool(wiki_enabled),
+ "snippets_enabled": project.to_bool(snippets_enabled),
+ "public": project.to_bool(public),
+ "visibility_level": int(visibility_level)}
+
+ if project_exists and state == "absent":
+ project.deleteProject(group_name, project_name)
+ module.exit_json(changed=True, result="Successfully deleted project %s" % project_name)
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="Project deleted or does not exists")
+ else:
+ if project.createOrUpdateProject(project_exists, group_name, import_url, arguments):
+ module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name)
+ else:
+ module.exit_json(changed=False)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/source_control/gitlab_user.py b/lib/ansible/modules/extras/source_control/gitlab_user.py
new file mode 100644
index 0000000000..d9b40401b0
--- /dev/null
+++ b/lib/ansible/modules/extras/source_control/gitlab_user.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes Gitlab Users
+description:
+ - When the user does not exists in Gitlab, it will be created.
+ - When the user does exists and state=absent, the user will be deleted.
+ - When changes are made to user, the user will be updated.
+version_added: "2.1"
+author: "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - pyapi-gitlab python module
+options:
+ server_url:
+ description:
+ - Url of Gitlab server, with protocol (http or https).
+ required: true
+ validate_certs:
+ description:
+ - When using https if SSL certificate needs to be verified.
+ required: false
+ default: true
+ aliases:
+ - verify_ssl
+ login_user:
+ description:
+ - Gitlab user name.
+ required: false
+ default: null
+ login_password:
+ description:
+ - Gitlab password for login_user
+ required: false
+ default: null
+ login_token:
+ description:
+ - Gitlab token for logging in.
+ required: false
+ default: null
+ name:
+ description:
+ - Name of the user you want to create
+ required: true
+ username:
+ description:
+ - The username of the user.
+ required: true
+ password:
+ description:
+ - The password of the user.
+ required: true
+ email:
+ description:
+ - The email that belongs to the user.
+ required: true
+ sshkey_name:
+ description:
+ - The name of the sshkey
+ required: false
+ default: null
+ sshkey_file:
+ description:
+ - The ssh key itself.
+ required: false
+ default: null
+ group:
+ description:
+ - Add user as an member to this group.
+ required: false
+ default: null
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master
+ - owner
+ required: false
+ default: null
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ required: false
+ default: present
+ choices: ["present", "absent"]
+'''
+
+EXAMPLES = '''
+- name: "Delete Gitlab User"
+ local_action: gitlab_user
+ server_url="http://gitlab.dj-wasabi.local"
+ validate_certs=false
+ login_token="WnUzDsxjy8230-Dy_k"
+ username=myusername
+ state=absent
+
+- name: "Create Gitlab User"
+ local_action: gitlab_user
+ server_url="https://gitlab.dj-wasabi.local"
+ validate_certs=true
+ login_user=dj-wasabi
+ login_password="MySecretPassword"
+ name=My Name
+ username=myusername
+ password=mysecretpassword
+ email=me@home.com
+ sshkey_name=MySSH
+ sshkey_file=ssh-rsa AAAAB3NzaC1yc...
+ state=present
+'''
+
+RETURN = '''# '''
+
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except:
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
+
+
+class GitLabUser(object):
+ def __init__(self, module, git):
+ self._module = module
+ self._gitlab = git
+
+ def addToGroup(self, group_id, user_id, access_level):
+ if access_level == "guest":
+ level = 10
+ elif access_level == "reporter":
+ level = 20
+ elif access_level == "developer":
+ level = 30
+ elif access_level == "master":
+ level = 40
+ elif access_level == "owner":
+ level = 50
+ return self._gitlab.addgroupmember(group_id, user_id, level)
+
+ def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
+ group_id = ''
+ arguments = {"name": user_name,
+ "username": user_username,
+ "email": user_email}
+
+ if group_name is not None:
+ if self.existsGroup(group_name):
+ group_id = self.getGroupId(group_name)
+
+ if self.existsUser(user_username):
+ self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
+ else:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
+
+ def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
+ user_changed = False
+
+ # Create the user
+ user_username = arguments['username']
+ user_name = arguments['name']
+ user_email = arguments['email']
+ if self._gitlab.createuser(password=user_password, **arguments):
+ user_id = self.getUserId(user_username)
+ if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
+ user_changed = True
+ # Add the user to the group if group_id is not empty
+ if group_id != '':
+ if self.addToGroup(group_id, user_id, access_level):
+ user_changed = True
+ user_changed = True
+
+ # Exit with change to true or false
+ if user_changed:
+ self._module.exit_json(changed=True, result="Created the user")
+ else:
+ self._module.exit_json(changed=False)
+
+ def deleteUser(self, user_username):
+ user_id = self.getUserId(user_username)
+
+ if self._gitlab.deleteuser(user_id):
+ self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
+ else:
+ self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
+
+ def existsGroup(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return True
+ return False
+
+ def existsUser(self, username):
+ found_user = self._gitlab.getusers(search=username)
+ for user in found_user:
+ if user['id'] != '':
+ return True
+ return False
+
+ def getGroupId(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return group['id']
+
+ def getUserId(self, username):
+ found_user = self._gitlab.getusers(search=username)
+ for user in found_user:
+ if user['id'] != '':
+ return user['id']
+
+ def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
+ user_changed = False
+ user_username = arguments['username']
+ user_id = self.getUserId(user_username)
+ user_data = self._gitlab.getuser(user_id=user_id)
+
+ # Lets check if we need to update the user
+ for arg_key, arg_value in arguments.items():
+ if user_data[arg_key] != arg_value:
+ user_changed = True
+
+ if user_changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._gitlab.edituser(user_id=user_id, **arguments)
+ user_changed = True
+ if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
+ user_changed = True
+ if group_id != '':
+ if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
+ user_changed = True
+ if user_changed:
+ self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
+ else:
+ self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
+
+
+def main():
+ global user_id
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
+ login_user=dict(required=False, no_log=True),
+ login_password=dict(required=False, no_log=True),
+ login_token=dict(required=False, no_log=True),
+ name=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True),
+ email=dict(required=True),
+ sshkey_name=dict(required=False),
+ sshkey_file=dict(required=False),
+ group=dict(required=False),
+ access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
+ state=dict(default="present", choices=["present", "absent"]),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
+
+ server_url = module.params['server_url']
+ verify_ssl = module.params['validate_certs']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_token = module.params['login_token']
+ user_name = module.params['name']
+ user_username = module.params['username']
+ user_password = module.params['password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ group_name = module.params['group']
+ access_level = module.params['access_level']
+ state = module.params['state']
+
+ # We need both login_user and login_password or login_token, otherwise we fail.
+ if login_user is not None and login_password is not None:
+ use_credentials = True
+ elif login_token is not None:
+ use_credentials = False
+ else:
+ module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
+
+ # Check if vars are none
+ if user_sshkey_file is not None and user_sshkey_name is not None:
+ use_sshkey = True
+ else:
+ use_sshkey = False
+
+ if group_name is not None and access_level is not None:
+ add_to_group = True
+ group_name = group_name.lower()
+ else:
+ add_to_group = False
+
+ user_username = user_username.lower()
+
+ # Lets make an connection to the Gitlab server_url, with either login_user and login_password
+ # or with login_token
+ try:
+ if use_credentials:
+ git = gitlab.Gitlab(host=server_url)
+ git.login(user=login_user, password=login_password)
+ else:
+ git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
+
+ # Validate if group exists and take action based on "state"
+ user = GitLabUser(module, git)
+
+ # Check if user exists, if not exists and state = absent, we exit nicely.
+ if not user.existsUser(user_username) and state == "absent":
+ module.exit_json(changed=False, result="User already deleted or does not exists")
+ else:
+ # User exists,
+ if state == "absent":
+ user.deleteUser(user_username)
+ else:
+ user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/__init__.py b/lib/ansible/modules/extras/storage/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/__init__.py
diff --git a/lib/ansible/modules/extras/storage/netapp/README.md b/lib/ansible/modules/extras/storage/netapp/README.md
new file mode 100644
index 0000000000..8d5ab2fd4c
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/README.md
@@ -0,0 +1,454 @@
+#NetApp Storage Modules
+This directory contains modules that support the storage platforms in the NetApp portfolio.
+
+##SANtricity Modules
+The modules prefixed with *netapp\_e* are built to support the SANtricity storage platform. They require the SANtricity
+WebServices Proxy. The WebServices Proxy is free software available at the [NetApp Software Download site](http://mysupport.netapp.com/NOW/download/software/eseries_webservices/1.40.X000.0009/).
+Starting with the E2800 platform (11.30 OS), the modules will work directly with the storage array. Starting with this
+platform, REST API requests are handled directly on the box. This array can still be managed by proxy for large scale deployments.
+The modules provide idempotent provisioning for volume groups, disk pools, standard volumes, thin volumes, LUN mapping,
+hosts, host groups (clusters), volume snapshots, consistency groups, and asynchronous mirroring.
+### Prerequisites
+| Software | Version |
+| -------- |:-------:|
+| SANtricity Web Services Proxy*|1.4 or 2.0|
+| Ansible | 2.2** |
+\* Not required for *E2800 with 11.30 OS*<br/>
+\*\*The modules where developed with this version. Ansible forward and backward compatibility applies.
+
+###Questions and Contribution
+Please feel free to submit pull requests with improvements. Issues for these modules should be routed to @hulquest but
+we also try to keep an eye on the list for issues specific to these modules. General questions can be made to our [development team](mailto:ng-hsg-engcustomer-esolutions-support@netapp.com)
+
+### Examples
+These examples are not comprehensive but are intended to help you get started when integrating storage provisioning into
+your playbooks.
+```yml
+- name: NetApp Test All Modules
+ hosts: proxy20
+ gather_facts: yes
+ connection: local
+ vars:
+ storage_systems:
+ ansible1:
+ address1: "10.251.230.41"
+ address2: "10.251.230.42"
+ ansible2:
+ address1: "10.251.230.43"
+ address2: "10.251.230.44"
+ ansible3:
+ address1: "10.251.230.45"
+ address2: "10.251.230.46"
+ ansible4:
+ address1: "10.251.230.47"
+ address2: "10.251.230.48"
+ storage_pools:
+ Disk_Pool_1:
+ raid_level: raidDiskPool
+ criteria_drive_count: 11
+ Disk_Pool_2:
+ raid_level: raidDiskPool
+ criteria_drive_count: 11
+ Disk_Pool_3:
+ raid_level: raid0
+ criteria_drive_count: 2
+ volumes:
+ vol_1:
+ storage_pool_name: Disk_Pool_1
+ size: 10
+ thin_provision: false
+ thin_volume_repo_size: 7
+ vol_2:
+ storage_pool_name: Disk_Pool_2
+ size: 10
+ thin_provision: false
+ thin_volume_repo_size: 7
+ vol_3:
+ storage_pool_name: Disk_Pool_3
+ size: 10
+ thin_provision: false
+ thin_volume_repo_size: 7
+ thin_vol_1:
+ storage_pool_name: Disk_Pool_1
+ size: 10
+ thin_provision: true
+ thin_volume_repo_size: 7
+ hosts:
+ ANSIBLE-1:
+ host_type: 1
+ index: 1
+ ports:
+ - type: 'fc'
+ label: 'fpPort1'
+ port: '2100000E1E191B01'
+
+ netapp_api_host: 10.251.230.29
+ netapp_api_url: http://{{ netapp_api_host }}/devmgr/v2
+ netapp_api_username: rw
+ netapp_api_password: rw
+ ssid: ansible1
+ auth: no
+ lun_mapping: no
+ netapp_api_validate_certs: False
+ snapshot: no
+ gather_facts: no
+ amg_create: no
+ remove_volume: no
+ make_volume: no
+ check_thins: no
+ remove_storage_pool: yes
+ check_storage_pool: yes
+ remove_storage_system: no
+ check_storage_system: yes
+ change_role: no
+ flash_cache: False
+ configure_hostgroup: no
+ configure_async_mirror: False
+ configure_snapshot: no
+ copy_volume: False
+ volume_copy_source_volume_id:
+ volume_destination_source_volume_id:
+ snapshot_volume_storage_pool_name: Disk_Pool_3
+ snapshot_volume_image_id: 3400000060080E5000299B640063074057BC5C5E
+ snapshot_volume: no
+ snapshot_volume_name: vol_1_snap_vol
+ host_type_index: 1
+ host_name: ANSIBLE-1
+ set_host: no
+ remove_host: no
+ amg_member_target_array:
+ amg_member_primary_pool:
+ amg_member_secondary_pool:
+ amg_member_primary_volume:
+ amg_member_secondary_volume:
+ set_amg_member: False
+ amg_array_name: foo
+ amg_name: amg_made_by_ansible
+ amg_secondaryArrayId: ansible2
+ amg_sync_name: foo
+ amg_sync: no
+
+ tasks:
+
+ - name: Get array facts
+ netapp_e_facts:
+ ssid: "{{ item.key }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ storage_systems }}"
+ when: gather_facts
+
+ - name: Presence of storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ controller_addresses:
+ - "{{ item.value.address1 }}"
+ - "{{ item.value.address2 }}"
+ with_dict: "{{ storage_systems }}"
+ when: check_storage_system
+
+ - name: Create Snapshot
+ netapp_e_snapshot_images:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ snapshot_group: "ansible_snapshot_group"
+ state: 'create'
+ when: snapshot
+
+ - name: Auth Module Example
+ netapp_e_auth:
+ ssid: "{{ ssid }}"
+ current_password: 'Infinit2'
+ new_password: 'Infinit1'
+ set_admin: yes
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: auth
+
+ - name: No disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ remove_volumes: yes
+ with_items:
+ - Disk_Pool_1
+ - Disk_Pool_2
+ - Disk_Pool_3
+ when: remove_storage_pool
+
+ - name: Make disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ raid_level: "{{ item.value.raid_level }}"
+ criteria_drive_count: "{{ item.value.criteria_drive_count }}"
+ with_dict: " {{ storage_pools }}"
+ when: check_storage_pool
+
+ - name: No thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewThinVolumeByAnsible
+ state: absent
+ thin_provision: yes
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: check_thins
+
+ - name: Make a thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewThinVolumeByAnsible
+ state: present
+ thin_provision: yes
+ thin_volume_repo_size: 7
+ size: 10
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ storage_pool_name: Disk_Pool_1
+ when: check_thins
+
+ - name: Remove standard/thick volumes
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: "{{ item.key }}"
+ state: absent
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ volumes }}"
+ when: remove_volume
+
+ - name: Make a volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: "{{ item.key }}"
+ state: present
+ storage_pool_name: "{{ item.value.storage_pool_name }}"
+ size: "{{ item.value.size }}"
+ thin_provision: "{{ item.value.thin_provision }}"
+ thin_volume_repo_size: "{{ item.value.thin_volume_repo_size }}"
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ volumes }}"
+ when: make_volume
+
+ - name: No storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ storage_systems }}"
+ when: remove_storage_system
+
+ - name: Update the role of a storage array
+ netapp_e_amg_role:
+ name: "{{ amg_name }}"
+ role: primary
+ force: true
+ noSync: true
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: change_role
+
+ - name: Flash Cache
+ netapp_e_flashcache:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ name: SSDCacheBuiltByAnsible
+ when: flash_cache
+
+ - name: Configure Hostgroup
+ netapp_e_hostgroup:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: absent
+ name: "ansible-host-group"
+ when: configure_hostgroup
+
+ - name: Configure Snapshot group
+ netapp_e_snapshot_group:
+ ssid: "{{ ssid }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ base_volume_name: vol_3
+ name: ansible_snapshot_group
+ repo_pct: 20
+ warning_threshold: 85
+ delete_limit: 30
+ full_policy: purgepit
+ storage_pool_name: Disk_Pool_3
+ rollback_priority: medium
+ when: configure_snapshot
+
+ - name: Copy volume
+ netapp_e_volume_copy:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ status: present
+ source_volume_id: "{{ volume_copy_source_volume_id }}"
+ destination_volume_id: "{{ volume_destination_source_volume_id }}"
+ when: copy_volume
+
+ - name: Snapshot volume
+ netapp_e_snapshot_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ state: present
+ storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
+ snapshot_image_id: "{{ snapshot_volume_image_id }}"
+ name: "{{ snapshot_volume_name }}"
+ when: snapshot_volume
+
+ - name: Remove hosts
+ netapp_e_host:
+ ssid: "{{ ssid }}"
+ state: absent
+ name: "{{ item.key }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ host_type_index: "{{ host_type_index }}"
+ with_dict: "{{hosts}}"
+ when: remove_host
+
+ - name: Ensure/add hosts
+ netapp_e_host:
+ ssid: "{{ ssid }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ name: "{{ item.key }}"
+ host_type_index: "{{ item.value.index }}"
+ ports:
+ - type: 'fc'
+ label: 'fpPort1'
+ port: '2100000E1E191B01'
+ with_dict: "{{hosts}}"
+ when: set_host
+
+ - name: Unmap a volume
+ netapp_e_lun_mapping:
+ state: absent
+ ssid: "{{ ssid }}"
+ lun: 2
+ target: "{{ host_name }}"
+ volume_name: "thin_vol_1"
+ target_type: host
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: lun_mapping
+
+ - name: Map a volume
+ netapp_e_lun_mapping:
+ state: present
+ ssid: "{{ ssid }}"
+ lun: 16
+ target: "{{ host_name }}"
+ volume_name: "thin_vol_1"
+ target_type: host
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: lun_mapping
+
+ - name: Update LUN Id
+ netapp_e_lun_mapping:
+ state: present
+ ssid: "{{ ssid }}"
+ lun: 2
+ target: "{{ host_name }}"
+ volume_name: "thin_vol_1"
+ target_type: host
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: lun_mapping
+
+ - name: AMG removal
+ netapp_e_amg:
+ state: absent
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: AMG create
+ netapp_e_amg:
+ state: present
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: start AMG async
+ netapp_e_amg_sync:
+ name: "{{ amg_name }}"
+ state: running
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: amg_sync
+```
diff --git a/lib/ansible/modules/extras/storage/netapp/__init__.py b/lib/ansible/modules/extras/storage/netapp/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/__init__.py
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_amg.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_amg.py
new file mode 100644
index 0000000000..44189988be
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_amg.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_amg
+short_description: Create, Remove, and Update Asynchronous Mirror Groups
+description:
+ - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ name:
+ description:
+ - The name of the async array you wish to target, or create.
+ - If C(state) is present and the name isn't found, it will attempt to create.
+ required: yes
+ secondaryArrayId:
+ description:
+ - The ID of the secondary array to be used in mirroing process
+ required: yes
+ syncIntervalMinutes:
+ description:
+ - The synchronization interval in minutes
+ required: no
+ default: 10
+ manualSync:
+ description:
+ - Setting this to true will cause other synchronization values to be ignored
+ required: no
+ default: no
+ recoveryWarnThresholdMinutes:
+ description:
+ - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
+ required: no
+ default: 20
+ repoUtilizationWarnThreshold:
+ description:
+ - Recovery point warning threshold
+ required: no
+ default: 80
+ interfaceType:
+ description:
+ - The intended protocol to use if both Fibre and iSCSI are available.
+ choices:
+ - iscsi
+ - fibre
+ required: no
+ default: null
+ syncWarnThresholdMinutes:
+ description:
+ - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
+ required: no
+ default: 10
+ ssid:
+ description:
+ - The ID of the primary storage array for the async mirror action
+ required: yes
+ state:
+ description:
+ - A C(state) of present will either create or update the async mirror group.
+ - A C(state) of absent will remove the async mirror group.
+ required: yes
+"""
+
+EXAMPLES = """
+ - name: AMG removal
+ na_eseries_amg:
+ state: absent
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: AMG create
+ netapp_e_amg:
+ state: present
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+"""
+
+RETURN = """
+msg:
+ description: Successful removal
+ returned: success
+ type: string
+ sample: "Async mirror group removed."
+
+msg:
+ description: Successful creation
+ returned: success
+ type: string
+ sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
+"""
+
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=False, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body):
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+ label_exists = False
+ matches_spec = False
+ current_state = None
+ async_id = None
+ api_data = None
+ desired_name = body.get('name')
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Error finding a match. Message: %s" % str(error))
+
+ for async_group in data:
+ if async_group['label'] == desired_name:
+ label_exists = True
+ api_data = async_group
+ async_id = async_group['groupRef']
+ current_state = dict(
+ syncIntervalMinutes=async_group['syncIntervalMinutes'],
+ syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
+ recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
+ repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
+ )
+
+ if current_state == desired_state:
+ matches_spec = True
+
+ return label_exists, matches_spec, api_data, async_id
+
+
+def create_async(module, ssid, api_url, api_pwd, api_usr, body):
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Exception while creating aysnc mirror group. Message: %s" % str(error))
+ return data
+
+
+def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+
+ if new_name:
+ desired_state['new_name'] = new_name
+
+ post_data = json.dumps(desired_state)
+
+ try:
+ rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
+ url_username=user, url_password=pwd)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Exception while updating async mirror group. Message: %s" % str(error))
+
+ return data
+
+
+def remove_amg(module, ssid, api_url, pwd, user, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
+ headers=HEADERS)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Exception while removing async mirror group. Message: %s" % str(error))
+
+ return
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ name=dict(required=True, type='str'),
+ new_name=dict(required=False, type='str'),
+ secondaryArrayId=dict(required=True, type='str'),
+ syncIntervalMinutes=dict(required=False, default=10, type='int'),
+ manualSync=dict(required=False, default=False, type='bool'),
+ recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
+ repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
+ interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
+ ssid=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ new_name = p.pop('new_name')
+ state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
+
+ if state == 'present':
+ if name_exists and spec_matches:
+ module.exit_json(changed=False, msg="Desired state met", **api_data)
+ elif name_exists and not spec_matches:
+ results = update_async(module, ssid, api_url, pwd, user,
+ p, new_name, async_id)
+ module.exit_json(changed=True,
+ msg="Async mirror group updated", async_id=async_id,
+ **results)
+ elif not name_exists:
+ results = create_async(module, ssid, api_url, user, pwd, p)
+ module.exit_json(changed=True, **results)
+
+ elif state == 'absent':
+ if name_exists:
+ remove_amg(module, ssid, api_url, pwd, user, async_id)
+ module.exit_json(changed=True, msg="Async mirror group removed.",
+ async_id=async_id)
+ else:
+ module.exit_json(changed=False,
+ msg="Async Mirror group: %s already absent" % p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_amg_role.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_amg_role.py
new file mode 100644
index 0000000000..7a2f1bdf18
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_amg_role.py
@@ -0,0 +1,239 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_amg_role
+short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
+description:
+ - Update a storage array to become the primary or secondary instance in an asynchronous mirror group
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - The ID of the primary storage array for the async mirror action
+ required: yes
+ role:
+ description:
+ - Whether the array should be the primary or secondary array for the AMG
+ required: yes
+ choices: ['primary', 'secondary']
+ noSync:
+ description:
+ - Whether to avoid synchronization prior to role reversal
+ required: no
+ default: no
+ choices: [yes, no]
+ force:
+ description:
+ - Whether to force the role reversal regardless of the online-state of the primary
+ required: no
+ default: no
+"""
+
+EXAMPLES = """
+ - name: Update the role of a storage array
+ netapp_e_amg_role:
+ name: updating amg role
+ role: primary
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+
+RETURN = """
+msg:
+ description: Failure message
+ returned: failure
+ type: string
+ sample: "No Async Mirror Group with the name."
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
+ amg_exists = False
+ has_desired_role = False
+ amg_id = None
+ amg_data = None
+ get_amgs = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + get_amgs
+ try:
+ amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except:
+ module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
+
+ for amg in amgs:
+ if amg['label'] == name:
+ amg_exists = True
+ amg_id = amg['id']
+ amg_data = amg
+ if amg['localRole'] == body.get('role'):
+ has_desired_role = True
+
+ return amg_exists, has_desired_role, amg_id, amg_data
+
+
+def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ request(url, data=post_data, method='POST', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
+
+ status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
+ status_url = api_url + status_endpoint
+ try:
+ rc, status = request(status_url, method='GET', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. " +
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
+
+ # Here we wait for the role reversal to complete
+ if 'roleChangeProgress' in status:
+ while status['roleChangeProgress'] != "none":
+ try:
+ rc, status = request(status_url, method='GET',
+ url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. " +
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
+ return status
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ role=dict(required=True, choices=['primary', 'secondary']),
+ noSync=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ name = p.pop('name')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
+
+ if not agm_exists:
+ module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
+ elif has_desired_role:
+ module.exit_json(changed=False, **amg_data)
+
+ else:
+ amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
+ if amg_data:
+ module.exit_json(changed=True, **amg_data)
+ else:
+ module.exit_json(changed=True, msg="AMG role changed.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_amg_sync.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_amg_sync.py
new file mode 100644
index 0000000000..a86b594f3b
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_amg_sync.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_amg_sync
+short_description: Conduct synchronization actions on asynchronous member groups.
+description:
+ - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - The ID of the storage array containing the AMG you wish to target
+ name:
+ description:
+ - The name of the async mirror group you wish to target
+ required: yes
+ state:
+ description:
+ - The synchronization action you'd like to take.
+ - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in progress, it will return with an OK status.
+ - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
+ choices:
+ - running
+ - suspended
+ required: yes
+ delete_recovery_point:
+ description:
+ - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
+ - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last failures point will be deleted and synchronization will continue.
+ - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary and the failures point will be preserved.
+ - "NOTE: This only has impact for newly launched syncs."
+ choices:
+ - yes
+ - no
+ default: no
+"""
+EXAMPLES = """
+ - name: start AMG async
+ netapp_e_amg_sync:
+ name: "{{ amg_sync_name }}"
+ state: running
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+"""
+RETURN = """
+json:
+ description: The object attributes of the AMG.
+ returned: success
+ type: string
+ example:
+ {
+ "changed": false,
+ "connectionType": "fc",
+ "groupRef": "3700000060080E5000299C24000006EF57ACAC70",
+ "groupState": "optimal",
+ "id": "3700000060080E5000299C24000006EF57ACAC70",
+ "label": "made_with_ansible",
+ "localRole": "primary",
+ "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
+ "orphanGroup": false,
+ "recoveryPointAgeAlertThresholdMinutes": 20,
+ "remoteRole": "secondary",
+ "remoteTarget": {
+ "nodeName": {
+ "ioInterfaceType": "fc",
+ "iscsiNodeName": null,
+ "remoteNodeWWN": "20040080E5299F1C"
+ },
+ "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
+ "scsiinitiatorTargetBaseProperties": {
+ "ioInterfaceType": "fc",
+ "iscsiinitiatorTargetBaseParameters": null
+ }
+ },
+ "remoteTargetId": "ansible2",
+ "remoteTargetName": "Ansible2",
+ "remoteTargetWwn": "60080E5000299F880000000056A25D56",
+ "repositoryUtilizationWarnThreshold": 80,
+ "roleChangeProgress": "none",
+ "syncActivity": "idle",
+ "syncCompletionTimeAlertThresholdMinutes": 10,
+ "syncIntervalMinutes": 10,
+ "worldWideName": "60080E5000299C24000006EF57ACAC70"
+ }
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class AMGsync(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ name=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ state=dict(required=True, type='str', choices=['running', 'suspended']),
+ delete_recovery_point=dict(required=False, type='bool', default=False)
+ ))
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.name = args['name']
+ self.ssid = args['ssid']
+ self.state = args['state']
+ self.delete_recovery_point = args['delete_recovery_point']
+ try:
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.url = args['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username"
+ "and api_password and api_url to the module.")
+ self.certs = args['validate_certs']
+
+ self.post_headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json"
+ }
+ self.amg_id, self.amg_obj = self.get_amg()
+
+ def get_amg(self):
+ endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
+ (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ headers=self.post_headers)
+ try:
+ amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
+ amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
+ except IndexError:
+ self.module.fail_json(
+ msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
+ return amg_id, amg_obj
+
+ @property
+ def current_state(self):
+ amg_id, amg_obj = self.get_amg()
+ return amg_obj['syncActivity']
+
+ def run_sync_action(self):
+ # If we get to this point we know that the states differ, and there is no 'err' state,
+ # so no need to revalidate
+
+ post_body = dict()
+ if self.state == 'running':
+ if self.current_state == 'idle':
+ if self.delete_recovery_point:
+ post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
+ suffix = 'sync'
+ else:
+ # In a suspended state
+ suffix = 'resume'
+ else:
+ suffix = 'suspend'
+
+ endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
+
+ (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
+ ignore_errors=True)
+
+ if not str(rc).startswith('2'):
+ self.module.fail_json(msg=str(resp['errorMessage']))
+
+ return resp
+
+ def apply(self):
+ state_map = dict(
+ running=['active'],
+ suspended=['userSuspended', 'internallySuspended', 'paused'],
+ err=['unkown', '_UNDEFINED'])
+
+ if self.current_state not in state_map[self.state]:
+ if self.current_state in state_map['err']:
+ self.module.fail_json(
+ msg="The sync is a state of '%s', this requires manual intervention. " +
+ "Please investigate and try again" % self.current_state)
+ else:
+ self.amg_obj = self.run_sync_action()
+
+ (ret, amg) = self.get_amg()
+ self.module.exit_json(changed=False, **amg)
+
+
+def main():
+ sync = AMGsync()
+ sync.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_auth.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_auth.py
new file mode 100644
index 0000000000..36fd7919dc
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_auth.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+---
+module: netapp_e_auth
+short_description: Sets or updates the password for a storage array.
+description:
+ - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web Services proxy. Note, all storage arrays do not have a Monitor or RO role.
+version_added: "2.2"
+author: Kevin Hulquest (@hulquest)
+options:
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ name:
+ description:
+ - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use the ID instead.
+ required: False
+ ssid:
+ description:
+ - the identifier of the storage array in the Web Services Proxy.
+ required: False
+ set_admin:
+ description:
+ - Boolean value on whether to update the admin password. If set to false then the RO account is updated.
+ default: False
+ current_password:
+ description:
+ - The current admin password. This is not required if the password hasn't been set before.
+ required: False
+ new_password:
+ description:
+ - The password you would like to set. Cannot be more than 30 characters.
+ required: True
+ api_url:
+ description:
+ - The full API url.
+ - "Example: http://ENDPOINT:8080/devmgr/v2"
+ - This can optionally be set via an environment variable, API_URL
+ required: False
+ api_username:
+ description:
+ - The username used to authenticate against the API
+ - This can optionally be set via an environment variable, API_USERNAME
+ required: False
+ api_password:
+ description:
+ - The password used to authenticate against the API
+ - This can optionally be set via an environment variable, API_PASSWORD
+ required: False
+'''
+
+EXAMPLES = '''
+- name: Test module
+ netapp_e_auth:
+ name: trex
+ current_password: 'B4Dpwd'
+ new_password: 'W0rs3P4sswd'
+ set_admin: yes
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: "Password Updated Successfully"
+'''
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def get_ssid(module, name, api_url, user, pwd):
+ count = 0
+ all_systems = 'storage-systems'
+ systems_url = api_url + all_systems
+ rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd)
+ for system in data:
+ if system['name'] == name:
+ count += 1
+ if count > 1:
+ module.fail_json(
+ msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
+ "Use the id instead")
+ else:
+ ssid = system['id']
+ else:
+ continue
+
+ if count == 0:
+ module.fail_json(msg="No storage array with the name %s was found" % name)
+
+ else:
+ return ssid
+
+
+def get_pwd_status(module, ssid, api_url, user, pwd):
+ pwd_status = "storage-systems/%s/passwords" % ssid
+ url = api_url + pwd_status
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
+ return data['readOnlyPasswordSet'], data['adminPasswordSet']
+ except HTTPError:
+ error = get_exception()
+ module.fail_json(msg="There was an issue with connecting, please check that your "
+ "endpoint is properly defined and your credentials are correct: %s" % str(error))
+
+
+def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
+ update_pwd = 'storage-systems/%s' % ssid
+ url = api_url + update_pwd
+ post_body = json.dumps(dict(storedPassword=pwd))
+ try:
+ rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
+ url_password=api_pwd)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err)))
+ return data
+
+
+def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
+ set_pass = "storage-systems/%s/passwords" % ssid
+ url = api_url + set_pass
+
+ if not current_password:
+ current_password = ""
+
+ post_body = json.dumps(
+ dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
+
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
+ ignore_errors=True)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err)))
+
+ if rc == 422:
+ post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
+
+ update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
+
+ if int(rc) == 204:
+ return update_data
+ else:
+ module.fail_json(msg="%s:%s" % (rc, data))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ ssid=dict(required=False, type='str'),
+ current_password=dict(required=False, no_log=True),
+ new_password=dict(required=True, no_log=True),
+ set_admin=dict(required=True, type='bool'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
+ required_one_of=[['name', 'ssid']])
+
+ name = module.params['name']
+ ssid = module.params['ssid']
+ current_password = module.params['current_password']
+ new_password = module.params['new_password']
+ set_admin = module.params['set_admin']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if name:
+ ssid = get_ssid(module, name, api_url, user, pwd)
+
+ ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
+
+ if admin_pwd and not current_password:
+ module.fail_json(
+ msg="Admin account has a password set. " +
+ "You must supply current_password in order to update the RO or Admin passwords")
+
+ if len(new_password) > 30:
+ module.fail_json(msg="Passwords must not be greater than 30 characters in length")
+
+ success = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
+ new_password=new_password,
+ set_admin=set_admin)
+
+ module.exit_json(changed=True, msg="Password Updated Successfully", **success)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_facts.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_facts.py
new file mode 100644
index 0000000000..37e3f82762
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_facts.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+module: netapp_e_facts
+version_added: '2.2'
+short_description: Get facts about NetApp E-Series arrays
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+
+description:
+ - Return various information about NetApp E-Series storage arrays (eg, configuration, disks)
+
+author: Kevin Hulquest (@hulquest)
+'''
+
+EXAMPLES = """
+---
+ - name: Get array facts
+ netapp_e_facts:
+ array_id: "{{ netapp_array_id }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+
+RETURN = """
+msg: Gathered facts for <StorageArrayId>.
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(required=True))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ p = module.params
+
+ ssid = p['ssid']
+ validate_certs = p['validate_certs']
+
+ api_usr = p['api_username']
+ api_pwd = p['api_password']
+ api_url = p['api_url']
+
+ facts = dict(ssid=ssid)
+
+ # fetch the list of storage-pool objects and look for one with a matching name
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s/graph" % ssid,
+ headers=dict(Accept="application/json"),
+ url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs)
+ except:
+ error = get_exception()
+ module.fail_json(
+ msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (ssid, str(error)))
+
+ facts['snapshot_images'] = [
+ dict(
+ id=d['id'],
+ status=d['status'],
+ pit_capacity=d['pitCapacity'],
+ creation_method=d['creationMethod'],
+ reposity_cap_utilization=d['repositoryCapacityUtilization'],
+ active_cow=d['activeCOW'],
+ rollback_source=d['isRollbackSource']
+ ) for d in resp['highLevelVolBundle']['pit']]
+
+ facts['netapp_disks'] = [
+ dict(
+ id=d['id'],
+ available=d['available'],
+ media_type=d['driveMediaType'],
+ status=d['status'],
+ usable_bytes=d['usableCapacity'],
+ tray_ref=d['physicalLocation']['trayRef'],
+ product_id=d['productID'],
+ firmware_version=d['firmwareVersion'],
+ serial_number=d['serialNumber'].lstrip()
+ ) for d in resp['drive']]
+
+ facts['netapp_storage_pools'] = [
+ dict(
+ id=sp['id'],
+ name=sp['name'],
+ available_capacity=sp['freeSpace'],
+ total_capacity=sp['totalRaidedSpace'],
+ used_capacity=sp['usedSpace']
+ ) for sp in resp['volumeGroup']]
+
+ all_volumes = list(resp['volume'])
+ # all_volumes.extend(resp['thinVolume'])
+
+ # TODO: exclude thin-volume repo volumes (how to ID?)
+ facts['netapp_volumes'] = [
+ dict(
+ id=v['id'],
+ name=v['name'],
+ parent_storage_pool_id=v['volumeGroupRef'],
+ capacity=v['capacity'],
+ is_thin_provisioned=v['thinProvisioned']
+ ) for v in all_volumes]
+
+ features = [f for f in resp['sa']['capabilities']]
+ features.extend([f['capability'] for f in resp['sa']['premiumFeatures'] if f['isEnabled']])
+ features = list(set(features)) # ensure unique
+ features.sort()
+ facts['netapp_enabled_features'] = features
+
+ # TODO: include other details about the storage pool (size, type, id, etc)
+ result = dict(ansible_facts=facts, changed=False)
+ module.exit_json(msg="Gathered facts for %s." % ssid, **result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_flashcache.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_flashcache.py
new file mode 100644
index 0000000000..5fa4a66974
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_flashcache.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+module: netapp_e_flashcache
+author: Kevin Hulquest (@hulquest)
+version_added: '2.2'
+short_description: Manage NetApp SSD caches
+description:
+- Create or remove SSD caches on a NetApp E-Series storage array.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified SSD cache should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ name:
+ required: true
+ description:
+ - The name of the SSD cache to manage
+ io_type:
+ description:
+ - The type of workload to optimize the cache for.
+ choices: ['filesystem','database','media']
+ default: filesystem
+ disk_count:
+ description:
+ - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
+ size_unit:
+ description:
+ - The unit to be applied to size arguments
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: gb
+ cache_size_min:
+ description:
+ - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
+'''
+
+EXAMPLES = """
+ - name: Flash Cache
+ netapp_e_flashcache:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ name: SSDCacheBuiltByAnsible
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: json for newly created flash cache
+"""
+import json
+import logging
+import sys
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class NetAppESeriesFlashCache(object):
+ def __init__(self):
+ self.name = None
+ self.log_mode = None
+ self.log_path = None
+ self.api_url = None
+ self.api_username = None
+ self.api_password = None
+ self.ssid = None
+ self.validate_certs = None
+ self.disk_count = None
+ self.size_unit = None
+ self.cache_size_min = None
+ self.io_type = None
+ self.driveRefs = None
+ self.state = None
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ disk_count=dict(type='int'),
+ disk_refs=dict(type='list'),
+ cache_size_min=dict(type='int'),
+ io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
+ type='str'),
+ criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
+ type='str'),
+ log_mode=dict(type='str'),
+ log_path=dict(type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+
+ ],
+ mutually_exclusive=[
+
+ ],
+ # TODO: update validation for various selection criteria
+ supports_check_mode=True
+ )
+
+ self.__dict__.update(self.module.params)
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if self.log_mode == 'file' and self.log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
+ elif self.log_mode == 'stderr':
+ logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
+
+ self.post_headers = dict(Accept="application/json")
+ self.post_headers['Content-Type'] = 'application/json'
+
+ def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
+ self.debug("getting candidate disks...")
+
+ drives_req = dict(
+ driveCount=disk_count,
+ sizeUnit=size_unit,
+ driveType='ssd',
+ )
+
+ if capacity:
+ drives_req['targetUsableCapacity'] = capacity
+
+ (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
+ data=json.dumps(drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ if rc == 204:
+ self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
+
+ disk_ids = [d['id'] for d in drives_resp]
+ bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
+
+ return (disk_ids, bytes)
+
+ def create_cache(self):
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
+ capacity=self.cache_size_min)
+
+ self.debug("creating ssd cache...")
+
+ create_fc_req = dict(
+ driveRefs=disk_ids,
+ name=self.name
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def update_cache(self):
+ self.debug('updating flash cache config...')
+ update_fc_req = dict(
+ name=self.name,
+ configType=self.io_type
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
+ data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def delete_cache(self):
+ self.debug('deleting flash cache...')
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ @property
+ def needs_more_disks(self):
+ if len(self.cache_detail['driveRefs']) < self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s" % (
+ len(self.cache_detail['driveRefs']), self.disk_count))
+ return True
+
+ @property
+ def needs_less_disks(self):
+ if len(self.cache_detail['driveRefs']) > self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s" % (
+ len(self.cache_detail['driveRefs']), self.disk_count))
+ return True
+
+ @property
+ def current_size_bytes(self):
+ return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
+
+ @property
+ def requested_size_bytes(self):
+ if self.cache_size_min:
+ return self.cache_size_min * self._size_unit_map[self.size_unit]
+ else:
+ return 0
+
+ @property
+ def needs_more_capacity(self):
+ if self.current_size_bytes < self.requested_size_bytes:
+ self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % (
+ self.current_size_bytes, self.requested_size_bytes))
+ return True
+
+ @property
+ def needs_resize(self):
+ return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
+
+ def resize_cache(self):
+ # increase up to disk count first, then iteratively add disks until we meet requested capacity
+
+ # TODO: perform this calculation in check mode
+ current_disk_count = len(self.cache_detail['driveRefs'])
+ proposed_new_disks = 0
+
+ proposed_additional_bytes = 0
+ proposed_disk_ids = []
+
+ if self.needs_more_disks:
+ proposed_disk_count = self.disk_count - current_disk_count
+
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
+ proposed_additional_bytes = bytes
+ proposed_disk_ids = disk_ids
+
+ while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
+ proposed_new_disks += 1
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
+ proposed_disk_ids = disk_ids
+ proposed_additional_bytes = bytes
+
+ add_drives_req = dict(
+ driveRef=proposed_disk_ids
+ )
+
+ self.debug("adding drives to flash-cache...")
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
+ data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ elif self.needs_less_disks and self.driveRefs:
+ rm_drives = dict(driveRef=self.driveRefs)
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
+ data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def apply(self):
+ result = dict(changed=False)
+ (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ if rc == 200:
+ self.cache_detail = cache_resp
+ else:
+ self.cache_detail = None
+
+ if rc not in [200, 404]:
+ raise Exception(
+ "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
+
+ if self.state == 'present':
+ if self.cache_detail:
+ # TODO: verify parameters against detail for changes
+ if self.cache_detail['name'] != self.name:
+ self.debug("CHANGED: name differs")
+ result['changed'] = True
+ if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
+ self.debug("CHANGED: io_type differs")
+ result['changed'] = True
+ if self.needs_resize:
+ self.debug("CHANGED: resize required")
+ result['changed'] = True
+ else:
+ self.debug("CHANGED: requested state is 'present' but cache does not exist")
+ result['changed'] = True
+ else: # requested state is absent
+ if self.cache_detail:
+ self.debug("CHANGED: requested state is 'absent' but cache exists")
+ result['changed'] = True
+
+ if not result['changed']:
+ self.debug("no changes, exiting...")
+ self.module.exit_json(**result)
+
+ if self.module.check_mode:
+ self.debug("changes pending in check mode, exiting early...")
+ self.module.exit_json(**result)
+
+ if self.state == 'present':
+ if not self.cache_detail:
+ self.create_cache()
+ else:
+ if self.needs_resize:
+ self.resize_cache()
+
+ # run update here as well, since io_type can't be set on creation
+ self.update_cache()
+
+ elif self.state == 'absent':
+ self.delete_cache()
+
+ # TODO: include other details about the storage pool (size, type, id, etc)
+ self.module.exit_json(changed=result['changed'], **self.resp)
+
+
+def main():
+ sp = NetAppESeriesFlashCache()
+ try:
+ sp.apply()
+ except Exception:
+ e = get_exception()
+ sp.debug("Exception in apply(): \n%s" % str(e))
+ sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_host.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_host.py
new file mode 100644
index 0000000000..2261d8264d
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_host.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_host
+short_description: manage eseries hosts
+description:
+ - Create, update, remove hosts on NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - the id of the storage array you wish to act against
+ required: True
+ name:
+ description:
+ - If the host doesnt yet exist, the label to assign at creation time.
+ - If the hosts already exists, this is what is used to identify the host to apply any desired changes
+ required: True
+ host_type_index:
+ description:
+ - The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information.
+ required: True
+ ports:
+ description:
+ - a list of of dictionaries of host ports you wish to associate with the newly created host
+ required: False
+ group:
+ description:
+ - the group you want the host to be a member of
+ required: False
+
+"""
+
+EXAMPLES = """
+ - name: Set Host Info
+ netapp_e_host:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ name: "{{ host_name }}"
+ host_type_index: "{{ host_type_index }}"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: The host has been created.
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data is None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class Host(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ group=dict(type='str', required=False),
+ ports=dict(type='list', required=False),
+ force_port=dict(type='bool', default=False),
+ name=dict(type='str', required=True),
+ host_type_index=dict(type='int', required=True)
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.group = args['group']
+ self.ports = args['ports']
+ self.force_port = args['force_port']
+ self.name = args['name']
+ self.host_type_index = args['host_type_index']
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+ self.ports = args['ports']
+ self.post_body = dict()
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def valid_host_type(self):
+ try:
+ (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ try:
+ match = filter(lambda host_type: host_type['index'] == self.host_type_index, host_types)[0]
+ return True
+ except IndexError:
+ self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
+
+ @property
+ def hostports_available(self):
+ used_ids = list()
+ try:
+ (rc, self.available_ports) = request(self.url + 'storage-systems/%s/unassociated-host-ports' % self.ssid,
+ url_password=self.pwd, url_username=self.user,
+ validate_certs=self.certs,
+ headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get unassociated host ports. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ if len(self.available_ports) > 0 and len(self.ports) <= len(self.available_ports):
+ for port in self.ports:
+ for free_port in self.available_ports:
+ # Desired Type matches but also make sure we havent already used the ID
+ if not free_port['id'] in used_ids:
+ # update the port arg to have an id attribute
+ used_ids.append(free_port['id'])
+ break
+
+ if len(used_ids) != len(self.ports) and not self.force_port:
+ self.module.fail_json(
+ msg="There are not enough free host ports with the specified port types to proceed")
+ else:
+ return True
+
+ else:
+ self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
+
+ @property
+ def group_id(self):
+ if self.group:
+ try:
+ (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
+ url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ try:
+ group_obj = filter(lambda group: group['name'] == self.group, all_groups)[0]
+ return group_obj['id']
+ except IndexError:
+ self.module.fail_json(msg="No group with the name: %s exists" % self.group)
+ else:
+ # Return the value equivalent of no group
+ return "0000000000000000000000000000000000000000"
+
+ @property
+ def host_exists(self):
+ try:
+ (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ self.all_hosts = all_hosts
+ try: # Try to grab the host object
+ self.host_obj = filter(lambda host: host['label'] == self.name, all_hosts)[0]
+ return True
+ except IndexError:
+ # Host with the name passed in does not exist
+ return False
+
+ @property
+ def needs_update(self):
+ needs_update = False
+ self.force_port_update = False
+
+ if self.host_obj['clusterRef'] != self.group_id or \
+ self.host_obj['hostTypeIndex'] != self.host_type_index:
+ needs_update = True
+
+ if self.ports:
+ if not self.host_obj['ports']:
+ needs_update = True
+ for arg_port in self.ports:
+ # First a quick check to see if the port is mapped to a different host
+ if not self.port_on_diff_host(arg_port):
+ for obj_port in self.host_obj['ports']:
+ if arg_port['label'] == obj_port['label']:
+ # Confirmed that port arg passed in exists on the host
+ # port_id = self.get_port_id(obj_port['label'])
+ if arg_port['type'] != obj_port['portId']['ioInterfaceType']:
+ needs_update = True
+ if 'iscsiChapSecret' in arg_port:
+ # No way to know the current secret attr, so always return True just in case
+ needs_update = True
+ else:
+ # If the user wants the ports to be reassigned, do it
+ if self.force_port:
+ self.force_port_update = True
+ needs_update = True
+ else:
+ self.module.fail_json(
+ msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port)
+
+ return needs_update
+
+ def port_on_diff_host(self, arg_port):
+ """ Checks to see if a passed in port arg is present on a different host """
+ for host in self.all_hosts:
+ # Only check 'other' hosts
+ if self.host_obj['name'] != self.name:
+ for port in host['ports']:
+ # Check if the port label is found in the port dict list of each host
+ if arg_port['label'] == port['label']:
+ self.other_host = host
+ return True
+ return False
+
+ def reassign_ports(self, apply=True):
+ if not self.post_body:
+ self.post_body = dict(
+ portsToUpdate=dict()
+ )
+
+ for port in self.ports:
+ if self.port_on_diff_host(port):
+ self.post_body['portsToUpdate'].update(dict(
+ portRef=self.other_host['hostPortRef'],
+ hostRef=self.host_obj['id'],
+ # Doesnt yet address port identifier or chap secret
+ ))
+
+ if apply:
+ try:
+ (rc, self.host_obj) = request(
+ self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
+ url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to reassign host port. Host Id [%s]. Array Id [%s]. Error [%s]." % (
+ self.host_obj['id'], self.ssid, str(err)))
+
+ def update_host(self):
+ if self.ports:
+ if self.hostports_available:
+ if self.force_port_update is True:
+ self.reassign_ports(apply=False)
+ # Make sure that only ports that arent being reassigned are passed into the ports attr
+ self.ports = [port for port in self.ports if not self.port_on_diff_host(port)]
+
+ self.post_body['ports'] = self.ports
+
+ if self.group:
+ self.post_body['groupId'] = self.group_id
+
+ self.post_body['hostType'] = dict(index=self.host_type_index)
+
+ try:
+ (rc, self.host_obj) = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
+ url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ self.module.exit_json(changed=True, **self.host_obj)
+
+ def create_host(self):
+ post_body = dict(
+ name=self.name,
+ host_type=dict(index=self.host_type_index),
+ groupId=self.group_id,
+ ports=self.ports
+ )
+ if self.ports:
+ # Check that all supplied port args are valid
+ if self.hostports_available:
+ post_body.update(ports=self.ports)
+ elif not self.force_port:
+ self.module.fail_json(
+ msg="You supplied ports that are already in use. Supply force_port to True if you wish to reassign the ports")
+
+ if not self.host_exists:
+ try:
+ (rc, create_resp) = request(self.url + "storage-systems/%s/hosts" % self.ssid, method='POST',
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ data=json.dumps(post_body), headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+ else:
+ self.module.exit_json(changed=False,
+ msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name))
+
+ self.host_obj = create_resp
+
+ if self.ports and self.force_port:
+ self.reassign_ports()
+
+ self.module.exit_json(changed=True, **self.host_obj)
+
+ def remove_host(self):
+ try:
+ (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
+ method='DELETE',
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to remote host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
+ self.ssid,
+ str(err)))
+
+ def apply(self):
+ if self.state == 'present':
+ if self.host_exists:
+ if self.needs_update and self.valid_host_type:
+ self.update_host()
+ else:
+ self.module.exit_json(changed=False, msg="Host already present.", id=self.ssid, label=self.name)
+ elif self.valid_host_type:
+ self.create_host()
+ else:
+ if self.host_exists:
+ self.remove_host()
+ self.module.exit_json(changed=True, msg="Host removed.")
+ else:
+ self.module.exit_json(changed=False, msg="Host already absent.", id=self.ssid, label=self.name)
+
+
+def main():
+ host = Host()
+ host.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_hostgroup.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_hostgroup.py
new file mode 100644
index 0000000000..5248c1d953
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_hostgroup.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+DOCUMENTATION = '''
+---
+module: netapp_e_hostgroup
+version_added: "2.2"
+short_description: Manage NetApp Storage Array Host Groups
+author: Kevin Hulquest (@hulquest)
+description:
+- Create, update or destroy host groups on a NetApp E-Series storage array.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified host group should exist or not.
+ choices: ['present', 'absent']
+ name:
+ required: false
+ description:
+ - The name of the host group to manage. Either this or C(id_num) must be supplied.
+ new_name:
+ required: false
+ description:
+ - specify this when you need to update the name of a host group
+ id:
+ required: false
+ description:
+ - The id number of the host group to manage. Either this or C(name) must be supplied.
+ hosts::
+ required: false
+ description:
+ - a list of host names/labels to add to the group
+'''
+EXAMPLES = '''
+ - name: Configure Hostgroup
+ netapp_e_hostgroup:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+'''
+RETURN = '''
+clusterRef:
+ description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
+ returned: always except when state is absent
+ type: string
+ sample: "3233343536373839303132333100000000000000"
+confirmLUNMappingCreation:
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping will alter the volume access rights of other clusters, in addition to this one.
+ returned: always
+ type: boolean
+ sample: false
+hosts:
+ description: A list of the hosts that are part of the host group after all operations.
+ returned: always except when state is absent
+ type: list
+ sample: ["HostA","HostB"]
+id:
+ description: The id number of the hostgroup
+ returned: always except when state is absent
+ type: string
+ sample: "3233343536373839303132333100000000000000"
+isSAControlled:
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ returned: always except when state is absent
+ type: boolean
+ sample: false
+label:
+ description: The user-assigned, descriptive label string for the cluster.
+ returned: always
+ type: string
+ sample: "MyHostGroup"
+name:
+ description: same as label
+ returned: always except when state is absent
+ type: string
+ sample: "MyHostGroup"
+protectionInformationCapableAccessMethod:
+ description: This field is true if the host has a PI capable access method.
+ returned: always except when state is absent
+ type: boolean
+ sample: true
+'''
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def group_exists(module, id_type, ident, ssid, api_url, user, pwd):
+ rc, data = get_hostgroups(module, ssid, api_url, user, pwd)
+ for group in data:
+ if group[id_type] == ident:
+ return True, data
+ else:
+ continue
+
+ return False, data
+
+
+def get_hostgroups(module, ssid, api_url, user, pwd):
+ groups = "storage-systems/%s/host-groups" % ssid
+ url = api_url + groups
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
+ return rc, data
+ except HTTPError:
+ err = get_exception()
+ module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+
+def get_hostref(module, ssid, name, api_url, user, pwd):
+ all_hosts = 'storage-systems/%s/hosts' % ssid
+ url = api_url + all_hosts
+ try:
+ rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+ for host in data:
+ if host['name'] == name:
+ return host['hostRef']
+ else:
+ continue
+
+ module.fail_json(msg="No host with the name %s could be found" % name)
+
+
+def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None):
+ groups = "storage-systems/%s/host-groups" % ssid
+ url = api_url + groups
+ hostrefs = []
+
+ if hosts:
+ for host in hosts:
+ href = get_hostref(module, ssid, host, api_url, user, pwd)
+ hostrefs.append(href)
+
+ post_data = json.dumps(dict(name=name, hosts=hostrefs))
+ try:
+ rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+ return rc, data
+
+
+def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None):
+ gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
+ groups = "storage-systems/%s/host-groups/%s" % (ssid, gid)
+ url = api_url + groups
+ hostrefs = []
+
+ if hosts:
+ for host in hosts:
+ href = get_hostref(module, ssid, host, api_url, user, pwd)
+ hostrefs.append(href)
+
+ if new_name:
+ post_data = json.dumps(dict(name=new_name, hosts=hostrefs))
+ else:
+ post_data = json.dumps(dict(hosts=hostrefs))
+
+ try:
+ rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid,
+ str(err)))
+
+ return rc, data
+
+
+def delete_hostgroup(module, ssid, group_id, api_url, user, pwd):
+ groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id)
+ url = api_url + groups
+ # TODO: Loop through hosts, do mapping to href, make new list to pass to data
+ try:
+ rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, str(err)))
+
+ return rc, data
+
+
+def get_hostgroup_id(module, ssid, name, api_url, user, pwd):
+ all_groups = 'storage-systems/%s/host-groups' % ssid
+ url = api_url + all_groups
+ rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ for hg in data:
+ if hg['name'] == name:
+ return hg['id']
+ else:
+ continue
+
+ module.fail_json(msg="A hostgroup with the name %s could not be found" % name)
+
+
+def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
+ all_groups = 'storage-systems/%s/host-groups' % ssid
+ g_url = api_url + all_groups
+ try:
+ g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name,
+ ssid,
+ str(err)))
+
+ all_hosts = 'storage-systems/%s/hosts' % ssid
+ h_url = api_url + all_hosts
+ try:
+ h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (
+ group_name,
+ ssid,
+ str(err)))
+
+ hosts_in_group = []
+
+ for hg in g_data:
+ if hg['name'] == group_name:
+ clusterRef = hg['clusterRef']
+
+ for host in h_data:
+ if host['clusterRef'] == clusterRef:
+ hosts_in_group.append(host['name'])
+
+ return hosts_in_group
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ new_name=dict(required=False),
+ ssid=dict(required=True),
+ id=dict(required=False),
+ state=dict(required=True, choices=['present', 'absent']),
+ hosts=dict(required=False, type='list'),
+ api_url=dict(required=True),
+ api_username=dict(required=True),
+ validate_certs=dict(required=False, default=True),
+ api_password=dict(required=True, no_log=True)
+ ),
+ supports_check_mode=False,
+ mutually_exclusive=[['name', 'id']],
+ required_one_of=[['name', 'id']]
+ )
+
+ name = module.params['name']
+ new_name = module.params['new_name']
+ ssid = module.params['ssid']
+ id_num = module.params['id']
+ state = module.params['state']
+ hosts = module.params['hosts']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if name:
+ id_type = 'name'
+ id_key = name
+ elif id_num:
+ id_type = 'id'
+ id_key = id_num
+
+ exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd)
+
+ if state == 'present':
+ if not exists:
+ try:
+ rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+ hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
+ module.exit_json(changed=True, hosts=hosts, **data)
+ else:
+ current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
+
+ if not current_hosts:
+ current_hosts = []
+
+ if not hosts:
+ hosts = []
+
+ if set(current_hosts) != set(hosts):
+ try:
+ rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err)))
+ module.exit_json(changed=True, hosts=hosts, **data)
+ else:
+ for group in group_data:
+ if group['name'] == name:
+ module.exit_json(changed=False, hosts=current_hosts, **group)
+
+ elif state == 'absent':
+ if exists:
+ hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
+ try:
+ rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err)))
+
+ module.exit_json(changed=True, msg="Host Group deleted")
+ else:
+ module.exit_json(changed=False, msg="Host Group is already absent")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_lun_mapping.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_lun_mapping.py
new file mode 100644
index 0000000000..be3c27515e
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_lun_mapping.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+---
+module: netapp_e_lun_mapping
+author: Kevin Hulquest (@hulquest)
+short_description: Create or Remove LUN Mappings
+description:
+ - Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays.
+version_added: "2.2"
+options:
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - "The storage system array identifier."
+ required: False
+ lun:
+ description:
+ - The LUN number you wish to give the mapping
+ - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
+ required: False
+ default: 0
+ target:
+ description:
+ - The name of host or hostgroup you wish to assign to the mapping
+ - If omitted, the default hostgroup is used.
+ - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
+ required: False
+ volume_name:
+ description:
+ - The name of the volume you wish to include in the mapping.
+ required: True
+ target_type:
+ description:
+ - Whether the target is a host or group.
+ - Required if supplying an explicit target.
+ required: False
+ choices: ["host", "group"]
+ state:
+ description:
+ - Present will ensure the mapping exists, absent will remove the mapping.
+ - All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied.
+ required: True
+ choices: ["present", "absent"]
+ api_url:
+ description:
+ - "The full API url. Example: http://ENDPOINT:8080/devmgr/v2"
+ - This can optionally be set via an environment variable, API_URL
+ required: False
+ api_username:
+ description:
+ - The username used to authenticate against the API. This can optionally be set via an environment variable, API_USERNAME
+ required: False
+ api_password:
+ description:
+ - The password used to authenticate against the API. This can optionally be set via an environment variable, API_PASSWORD
+ required: False
+'''
+
+EXAMPLES = '''
+---
+ - name: Lun Mapping Example
+ netapp_e_lun_mapping:
+ state: present
+ ssid: 1
+ lun: 12
+ target: Wilson
+ volume_name: Colby1
+ target_type: group
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+'''
+RETURN = '''
+msg: Mapping exists.
+msg: Mapping removed.
+'''
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def get_host_and_group_map(module, ssid, api_url, user, pwd):
+ mapping = dict(host=dict(), group=dict())
+
+ hostgroups = 'storage-systems/%s/host-groups' % ssid
+ groups_url = api_url + hostgroups
+ try:
+ hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))
+
+ for group in hg_data:
+ mapping['group'][group['name']] = group['id']
+
+ hosts = 'storage-systems/%s/hosts' % ssid
+ hosts_url = api_url + hosts
+ try:
+ h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))
+
+ for host in h_data:
+ mapping['host'][host['name']] = host['id']
+
+ return mapping
+
+
+def get_volume_id(module, data, ssid, name, api_url, user, pwd):
+ qty = 0
+ for volume in data:
+ if volume['name'] == name:
+ qty += 1
+
+ if qty > 1:
+ module.fail_json(msg="More than one volume with the name: %s was found, "
+ "please use the volume WWN instead" % name)
+ else:
+ wwn = volume['wwn']
+
+ try:
+ return wwn
+ except NameError:
+ module.fail_json(msg="No volume with the name: %s, was found" % (name))
+
+
+def get_hostgroups(module, ssid, api_url, user, pwd):
+ groups = "storage-systems/%s/host-groups" % ssid
+ url = api_url + groups
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
+ return data
+ except Exception:
+ module.fail_json(msg="There was an issue with connecting, please check that your"
+ "endpoint is properly defined and your credentials are correct")
+
+
+def get_volumes(module, ssid, api_url, user, pwd, mappable):
+ volumes = 'storage-systems/%s/%s' % (ssid, mappable)
+ url = api_url + volumes
+ try:
+ rc, data = request(url, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err)))
+ return data
+
+
+def get_lun_mappings(ssid, api_url, user, pwd, get_all=None):
+ mappings = 'storage-systems/%s/volume-mappings' % ssid
+ url = api_url + mappings
+ rc, data = request(url, url_username=user, url_password=pwd)
+
+ if not get_all:
+ remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id')
+
+ for key in remove_keys:
+ for mapping in data:
+ del mapping[key]
+
+ return data
+
+
+def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd):
+ mappings = 'storage-systems/%s/volume-mappings' % ssid
+ url = api_url + mappings
+ post_body = json.dumps(dict(
+ mappableObjectId=lun_map['volumeRef'],
+ targetId=lun_map['mapRef'],
+ lun=lun_map['lun']
+ ))
+
+ rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
+ ignore_errors=True)
+
+ if rc == 422:
+ data = move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd)
+ # module.fail_json(msg="The volume you specified '%s' is already "
+ # "part of a different LUN mapping. If you "
+ # "want to move it to a different host or "
+ # "hostgroup, then please use the "
+ # "netapp_e_move_lun module" % vol_name)
+ return data
+
+
+def move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd):
+ lun_id = get_lun_id(module, ssid, lun_map, api_url, user, pwd)
+ move_lun = "storage-systems/%s/volume-mappings/%s/move" % (ssid, lun_id)
+ url = api_url + move_lun
+ post_body = json.dumps(dict(targetId=lun_map['mapRef'], lun=lun_map['lun']))
+ rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS)
+ return data
+
+
+def get_lun_id(module, ssid, lun_mapping, api_url, user, pwd):
+ data = get_lun_mappings(ssid, api_url, user, pwd, get_all=True)
+
+ for lun_map in data:
+ if lun_map['volumeRef'] == lun_mapping['volumeRef']:
+ return lun_map['id']
+ # This shouldn't ever get called
+ module.fail_json(msg="No LUN map found.")
+
+
+def remove_mapping(module, ssid, lun_mapping, api_url, user, pwd):
+ lun_id = get_lun_id(module, ssid, lun_mapping, api_url, user, pwd)
+ lun_del = "storage-systems/%s/volume-mappings/%s" % (ssid, lun_id)
+ url = api_url + lun_del
+ rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS)
+ return data
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ target=dict(required=False, default=None),
+ target_type=dict(required=False, choices=['host', 'group']),
+ lun=dict(required=False, type='int', default=0),
+ ssid=dict(required=False),
+ volume_name=dict(required=True),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ state = module.params['state']
+ target = module.params['target']
+ target_type = module.params['target_type']
+ lun = module.params['lun']
+ ssid = module.params['ssid']
+ vol_name = module.params['volume_name']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ volume_map = get_volumes(module, ssid, api_url, user, pwd, "volumes")
+ thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, "thin-volumes")
+ volref = None
+
+ for vol in volume_map:
+ if vol['label'] == vol_name:
+ volref = vol['volumeRef']
+
+ if not volref:
+ for vol in thin_volume_map:
+ if vol['label'] == vol_name:
+ volref = vol['volumeRef']
+
+ if not volref:
+ module.fail_json(changed=False, msg="No volume with the name %s was found" % vol_name)
+
+ host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd)
+
+ desired_lun_mapping = dict(
+ mapRef=host_and_group_mapping[target_type][target],
+ lun=lun,
+ volumeRef=volref
+ )
+
+ lun_mappings = get_lun_mappings(ssid, api_url, user, pwd)
+
+ if state == 'present':
+ if desired_lun_mapping in lun_mappings:
+ module.exit_json(changed=False, msg="Mapping exists")
+ else:
+ result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd)
+ module.exit_json(changed=True, **result)
+
+ elif state == 'absent':
+ if desired_lun_mapping in lun_mappings:
+ result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd)
+ module.exit_json(changed=True, msg="Mapping removed")
+ else:
+ module.exit_json(changed=False, msg="Mapping absent")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_group.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_group.py
new file mode 100644
index 0000000000..90c6e8471b
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_group.py
@@ -0,0 +1,382 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_group
+short_description: Manage snapshot groups
+description:
+ - Create, update, delete snapshot groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ state:
+ description:
+ - Whether to ensure the group is present or absent.
+ required: True
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - The name to give the snapshot group
+ required: True
+ base_volume_name:
+ description:
+ - The name of the base volume or thin volume to use as the base for the new snapshot group.
+ - If a snapshot group with an identical C(name) already exists but with a different base volume
+ an error will be returned.
+ required: True
+ repo_pct:
+ description:
+ - The size of the repository in relation to the size of the base volume
+ required: False
+ default: 20
+ warning_threshold:
+ description:
+ - The repository utilization warning threshold, as a percentage of the repository volume capacity.
+ required: False
+ default: 80
+ delete_limit:
+ description:
+ - The automatic deletion indicator.
+ - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of snapshot images limited to the number specified.
+ - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
+ required: False
+ default: 30
+ full_policy:
+ description:
+ - The behavior on when the data repository becomes full.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ required: False
+ default: purgepit
+ choices:
+ - purgepit
+ - unknown
+ - failbasewrites
+ - __UNDEFINED
+ storage_pool_name:
+ required: True
+ description:
+ - The name of the storage pool on which to allocate the repository volume.
+ rollback_priority:
+ required: False
+ description:
+ - The importance of the rollback operation.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ choices:
+ - highest
+ - high
+ - medium
+ - low
+ - lowest
+ - __UNDEFINED
+ default: medium
+"""
+
+EXAMPLES = """
+ - name: Configure Snapshot group
+ netapp_e_snapshot_group:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ base_volume_name: SSGroup_test
+ name=: OOSS_Group
+ repo_pct: 20
+ warning_threshold: 85
+ delete_limit: 30
+ full_policy: purgepit
+ storage_pool_name: Disk_Pool_1
+ rollback_priority: medium
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: json facts for newly created snapshot group.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotGroup(object):
+ def __init__(self):
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ base_volume_name=dict(required=True),
+ name=dict(required=True),
+ repo_pct=dict(default=20, type='int'),
+ warning_threshold=dict(default=80, type='int'),
+ delete_limit=dict(default=30, type='int'),
+ full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
+ rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
+ storage_pool_name=dict(type='str'),
+ ssid=dict(required=True),
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+
+ self.post_data = dict()
+ self.warning_threshold = self.module.params['warning_threshold']
+ self.base_volume_name = self.module.params['base_volume_name']
+ self.name = self.module.params['name']
+ self.repo_pct = self.module.params['repo_pct']
+ self.delete_limit = self.module.params['delete_limit']
+ self.full_policy = self.module.params['full_policy']
+ self.rollback_priority = self.module.params['rollback_priority']
+ self.storage_pool_name = self.module.params['storage_pool_name']
+ self.state = self.module.params['state']
+
+ self.url = self.module.params['api_url']
+ self.user = self.module.params['api_username']
+ self.pwd = self.module.params['api_password']
+ self.certs = self.module.params['validate_certs']
+ self.ssid = self.module.params['ssid']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ self.changed = False
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ try:
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
+ "Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def volume_id(self):
+ volumes = 'storage-systems/%s/volumes' % self.ssid
+ url = self.url + volumes
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
+ "Id [%s]. Error [%s]." % (self.ssid, str(err)))
+ qty = 0
+ for volume in data:
+ if volume['name'] == self.base_volume_name:
+ qty += 1
+
+ if qty > 1:
+ self.module.fail_json(msg="More than one volume with the name: %s was found, "
+ "please ensure your volume has a unique name" % self.base_volume_name)
+ else:
+ Id = volume['id']
+ self.volume = volume
+
+ try:
+ return Id
+ except NameError:
+ self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
+
+ @property
+ def snapshot_group_id(self):
+ url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to fetch snapshot groups. " +
+ "Id [%s]. Error [%s]." % (self.ssid, str(err)))
+ for ssg in data:
+ if ssg['name'] == self.name:
+ self.ssg_data = ssg
+ return ssg['id']
+
+ return None
+
+ @property
+ def ssg_needs_update(self):
+ if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
+ self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
+ self.ssg_data['repFullPolicy'] != self.full_policy or \
+ self.ssg_data['rollbackPriority'] != self.rollback_priority:
+ return True
+ else:
+ return False
+
+ def create_snapshot_group(self):
+ self.post_data = dict(
+ baseMappableObjectId=self.volume_id,
+ name=self.name,
+ repositoryPercentage=self.repo_pct,
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ storagePoolId=self.pool_id,
+ )
+ snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
+ url = self.url + snapshot
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to create snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ if not self.snapshot_group_id:
+ self.snapshot_group_id = self.ssg_data['id']
+
+ if self.ssg_needs_update:
+ self.update_ssg()
+ else:
+ self.module.exit_json(changed=True, **self.ssg_data)
+
+ def update_ssg(self):
+ self.post_data = dict(
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ rollbackPriority=self.rollback_priority
+ )
+
+ url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to update snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ def apply(self):
+ if self.state == 'absent':
+ if self.snapshot_group_id:
+ try:
+ rc, resp = request(
+ self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
+ method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
+ validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to delete snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ str(err)))
+ self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, msg="Snapshot group absent")
+
+ elif self.snapshot_group_id:
+ if self.ssg_needs_update:
+ self.update_ssg()
+ self.module.exit_json(changed=True, **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, **self.ssg_data)
+ else:
+ self.create_snapshot_group()
+
+
+def main():
+ vg = SnapshotGroup()
+ vg.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_images.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_images.py
new file mode 100644
index 0000000000..8c81af8453
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_images.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_images
+short_description: Create and delete snapshot images
+description:
+ - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
+ - Only the oldest snapshot image can be deleted so consistency is preserved.
+ - "Related: Snapshot volumes are created from snapshot images."
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ snapshot_group:
+ description:
+ - The name of the snapshot group in which you want to create a snapshot image.
+ required: True
+ state:
+ description:
+ - Whether a new snapshot image should be created or oldest be deleted.
+ required: True
+ choices: ['create', 'remove']
+"""
+EXAMPLES = """
+ - name: Create Snapshot
+ netapp_e_snapshot_images:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_group: "3300000060080E5000299C24000005B656D9F394"
+ state: 'create'
+"""
+RETURN = """
+---
+ changed: true
+ msg: "Created snapshot image"
+ image_id: "3400000060080E5000299B640063074057BC5C5E "
+"""
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
+ snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
+ snap_groups_url = api_url + snap_groups
+ (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ snapshot_group_id = None
+ for snapshot_group in snapshot_groups:
+ if name == snapshot_group['label']:
+ snapshot_group_id = snapshot_group['pitGroupRef']
+ break
+ if snapshot_group_id is None:
+ module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return snapshot_group
+
+
+def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
+ get_status = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + get_status
+
+ try:
+ (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
+ (name, ssid, str(err)))
+ if not images:
+ module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
+
+ oldest = min(images, key=lambda x: x['pitSequenceNumber'])
+ if oldest is None or "pitRef" not in oldest:
+ module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return oldest
+
+
+def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
+ snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
+ snapshot_group_id = snapshot_group_obj['pitGroupRef']
+ endpoint = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps({'groupId': snapshot_group_id})
+
+ image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ if image_data[1]['status'] == 'optimal':
+ status = True
+ id = image_data[1]['id']
+ else:
+ status = False
+ id = ''
+
+ return status, id
+
+
+def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
+ image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
+ image_id = image['pitRef']
+ endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
+ url = api_url + endpoint
+
+ try:
+ (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except Exception:
+ e = get_exception()
+ image_data = (e[0], e[1])
+
+ if ret == 204:
+ deleted_status = True
+ error_message = ''
+ else:
+ deleted_status = False
+ error_message = image_data[1]['errorMessage']
+
+ return deleted_status, error_message
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ snapshot_group=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, default=True),
+ state=dict(required=True, choices=['create', 'remove'], type='str'),
+ ))
+ module = AnsibleModule(argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ snapshot_group = p.pop('snapshot_group')
+ desired_state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if desired_state == 'create':
+ created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
+
+ if created_status:
+ module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
+ else:
+ deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
+
+ if deleted:
+ module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
+ ssid, snapshot_group, error_msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_volume.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_volume.py
new file mode 100644
index 0000000000..9a143bd412
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_snapshot_volume.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_volume
+short_description: Manage E/EF-Series snapshot volumes.
+description:
+ - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+note: Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status will be returned, no other changes can be made to a pre-existing snapshot volume.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - storage array ID
+ required: True
+ snapshot_image_id:
+ required: True
+ description:
+ - The identifier of the snapshot image used to create the new snapshot volume.
+ - "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
+ full_threshold:
+ description:
+ - The repository utilization warning threshold percentage
+ default: 85
+ name:
+ required: True
+ description:
+ - The name you wish to give the snapshot volume
+ view_mode:
+ required: True
+ description:
+ - The snapshot volume access mode
+ choices:
+ - modeUnknown
+ - readWrite
+ - readOnly
+ - __UNDEFINED
+ repo_percentage:
+ description:
+ - The size of the view in relation to the size of the base volume
+ default: 20
+ storage_pool_name:
+ description:
+ - Name of the storage pool on which to allocate the repository volume.
+ required: True
+ state:
+ description:
+ - Whether to create or remove the snapshot volume
+ required: True
+ choices:
+ - absent
+ - present
+"""
+EXAMPLES = """
+ - name: Snapshot volume
+ netapp_e_snapshot_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"/
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ state: present
+ storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
+ snapshot_image_id: "{{ snapshot_volume_image_id }}"
+ name: "{{ snapshot_volume_name }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Json facts for the volume that was created.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotVolume(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=True),
+ snapshot_image_id=dict(type='str', required=True),
+ full_threshold=dict(type='int', default=85),
+ name=dict(type='str', required=True),
+ view_mode=dict(type='str', default='readOnly',
+ choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
+ repo_percentage=dict(type='int', default=20),
+ storage_pool_name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present'])
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.snapshot_image_id = args['snapshot_image_id']
+ self.full_threshold = args['full_threshold']
+ self.name = args['name']
+ self.view_mode = args['view_mode']
+ self.repo_percentage = args['repo_percentage']
+ self.storage_pool_name = args['storage_pool_name']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def ss_vol_exists(self):
+ rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ if ss_vols:
+ for ss_vol in ss_vols:
+ if ss_vol['name'] == self.name:
+ self.ss_vol = ss_vol
+ return True
+ else:
+ return False
+
+ return False
+
+ @property
+ def ss_vol_needs_update(self):
+ if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
+ return True
+ else:
+ return False
+
+ def create_ss_vol(self):
+ post_data = dict(
+ snapshotImageId=self.snapshot_image_id,
+ fullThreshold=self.full_threshold,
+ name=self.name,
+ viewMode=self.view_mode,
+ repositoryPercentage=self.repo_percentage,
+ repositoryPoolId=self.pool_id
+ )
+
+ rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
+ url_password=self.pwd, validate_certs=self.certs, method='POST')
+
+ self.ss_vol = create_resp
+ # Doing a check after creation because the creation call fails to set the specified warning threshold
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=True, **create_resp)
+
+ def update_ss_vol(self):
+ post_data = dict(
+ fullThreshold=self.full_threshold,
+ )
+
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ method='POST', validate_certs=self.certs)
+
+ self.module.exit_json(changed=True, **resp)
+
+ def remove_ss_vol(self):
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ method='DELETE')
+ self.module.exit_json(changed=True, msg="Volume successfully deleted")
+
+ def apply(self):
+ if self.state == 'present':
+ if self.ss_vol_exists:
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=False, **self.ss_vol)
+ else:
+ self.create_ss_vol()
+ else:
+ if self.ss_vol_exists:
+ self.remove_ss_vol()
+ else:
+ self.module.exit_json(changed=False, msg="Volume already absent")
+
+
+def main():
+ sv = SnapshotVolume()
+ sv.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_storage_system.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_storage_system.py
new file mode 100644
index 0000000000..40ef893ad9
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_storage_system.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+module: netapp_e_storage_system
+version_added: "2.2"
+short_description: Add/remove arrays from the Web Services Proxy
+description:
+- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+ state:
+ required: true
+ description:
+ - Whether the specified array should be configured on the Web Services Proxy or not.
+ choices: ['present', 'absent']
+ controller_addresses:
+ required: true
+ description:
+ - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
+ array_wwn:
+ required: false
+ description:
+ - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of controller_addresses parameter.
+ array_password:
+ required: false
+ description:
+ - The management password of the array to manage, if set.
+ enable_trace:
+ required: false
+ default: false
+ description:
+ - Enable trace logging for SYMbol calls to the storage system.
+ meta_tags:
+ required: false
+ default: None
+ description:
+ - Optional meta tags to associate to this storage system
+author: Kevin Hulquest (@hulquest)
+'''
+
+EXAMPLES = '''
+---
+ - name: Presence of storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ controller_addresses:
+ - "{{ item.value.address1 }}"
+ - "{{ item.value.address2 }}"
+ with_dict: "{{ storage_systems }}"
+ when: check_storage_system
+'''
+
+RETURN = '''
+msg: Storage system removed.
+msg: Storage system added.
+'''
+import json
+from datetime import datetime as dt, timedelta
+from time import sleep
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
+ (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
+ method='POST', url_username=api_usr, url_password=api_pwd,
+ validate_certs=validate_certs)
+ status = None
+ return_resp = resp
+ if 'status' in resp:
+ status = resp['status']
+
+ if rc == 201:
+ status = 'neverContacted'
+ fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
+
+ while status == 'neverContacted':
+ if dt.utcnow() > fail_after_time:
+ raise Exception("web proxy timed out waiting for array status")
+
+ sleep(1)
+ (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
+ headers=dict(Accept="application/json"), url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ status = system_resp['status']
+ return_resp = system_resp
+
+ return status, return_resp
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ ssid=dict(required=True, type='str'),
+ controller_addresses=dict(type='list'),
+ array_wwn=dict(required=False, type='str'),
+ array_password=dict(required=False, type='str', no_log=True),
+ array_status_timeout_sec=dict(default=60, type='int'),
+ enable_trace=dict(default=False, type='bool'),
+ meta_tags=dict(type='list')
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['controller_addresses', 'array_wwn']],
+ required_if=[('state', 'present', ['controller_addresses'])]
+ )
+
+ p = module.params
+
+ state = p['state']
+ ssid = p['ssid']
+ controller_addresses = p['controller_addresses']
+ array_wwn = p['array_wwn']
+ array_password = p['array_password']
+ array_status_timeout_sec = p['array_status_timeout_sec']
+ validate_certs = p['validate_certs']
+ meta_tags = p['meta_tags']
+ enable_trace = p['enable_trace']
+
+ api_usr = p['api_username']
+ api_pwd = p['api_password']
+ api_url = p['api_url']
+
+ changed = False
+ array_exists = False
+
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
+ url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, str(err)))
+
+ array_exists = True
+ array_detail = resp
+
+ if rc == 200:
+ if state == 'absent':
+ changed = True
+ array_exists = False
+ elif state == 'present':
+ current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
+ if set(controller_addresses) != current_addresses:
+ changed = True
+ if array_detail['wwn'] != array_wwn and array_wwn is not None:
+ module.fail_json(
+ msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % (ssid, array_detail['wwn']))
+ elif rc == 404:
+ if state == 'present':
+ changed = True
+ array_exists = False
+ else:
+ changed = False
+ module.exit_json(changed=changed, msg="Storage system was not present.")
+
+ if changed and not module.check_mode:
+ if state == 'present':
+ if not array_exists:
+ # add the array
+ array_add_req = dict(
+ id=ssid,
+ controllerAddresses=controller_addresses,
+ metaTags=meta_tags,
+ enableTrace=enable_trace
+ )
+
+ if array_wwn:
+ array_add_req['wwn'] = array_wwn
+
+ if array_password:
+ array_add_req['password'] = array_password
+
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ request_data = json.dumps(array_add_req)
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
+ array_status_timeout_sec)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, request_data, str(err)))
+
+ else: # array exists, modify...
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ post_body = dict(
+ controllerAddresses=controller_addresses,
+ removeAllTags=True,
+ enableTrace=enable_trace,
+ metaTags=meta_tags
+ )
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
+ array_status_timeout_sec)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, post_body, str(err)))
+
+ elif state == 'absent':
+ # delete the array
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
+ url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, str(err)))
+
+ if rc == 422:
+ module.exit_json(changed=changed, msg="Storage system was not presnt.")
+ if rc == 204:
+ module.exit_json(changed=changed, msg="Storage system removed.")
+
+ module.exit_json(changed=changed, **resp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_storagepool.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_storagepool.py
new file mode 100644
index 0000000000..1d86ef46f6
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_storagepool.py
@@ -0,0 +1,884 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: netapp_e_storagepool
+short_description: Manage disk groups and disk pools
+version_added: '2.2'
+description:
+ - Create or remove disk groups and disk pools for NetApp E-series storage arrays.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified storage pool should exist or not.
+ - Note that removing a storage pool currently requires the removal of all defined volumes first.
+ choices: ['present', 'absent']
+ name:
+ required: true
+ description:
+ - The name of the storage pool to manage
+ criteria_drive_count:
+ description:
+ - The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place
+ criteria_drive_type:
+ description:
+ - The type of disk (hdd or ssd) to use when searching for candidates to use.
+ choices: ['hdd','ssd']
+ criteria_size_unit:
+ description:
+ - The unit used to interpret size parameters
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ criteria_drive_min_size:
+ description:
+ - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ criteria_min_usable_capacity:
+ description:
+ - The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size.
+ criteria_drive_interface_type:
+ description:
+ - The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered)
+ choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
+ criteria_drive_require_fde:
+ description:
+ - Whether full disk encryption ability is required for drives to be added to the storage pool
+ raid_level:
+ required: true
+ choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']
+ description:
+ - "Only required when the requested state is 'present'. The RAID level of the storage pool to be created."
+ erase_secured_drives:
+ required: false
+ choices: ['true', 'false']
+ description:
+ - Whether to erase secured disks before adding to storage pool
+ secure_pool:
+ required: false
+ choices: ['true', 'false']
+ description:
+ - Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable.
+ reserve_drive_count:
+ required: false
+ description:
+ - Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools.
+ remove_volumes:
+ required: false
+ default: False
+ description:
+ - Prior to removing a storage pool, delete all volumes in the pool.
+author: Kevin Hulquest (@hulquest)
+
+'''
+EXAMPLES = '''
+ - name: No disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+'''
+RETURN = '''
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Json facts for the pool that was created.
+'''
+
+import json
+import logging
+from traceback import format_exc
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def select(predicate, iterable):
+ # python 2, 3 generic filtering.
+ if predicate is None:
+ predicate = bool
+ for x in iterable:
+ if predicate(x):
+ yield x
+
+
+class groupby(object):
+ # python 2, 3 generic grouping.
+ def __init__(self, iterable, key=None):
+ if key is None:
+ key = lambda x: x
+ self.keyfunc = key
+ self.it = iter(iterable)
+ self.tgtkey = self.currkey = self.currvalue = object()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ while self.currkey == self.tgtkey:
+ self.currvalue = next(self.it) # Exit on StopIteration
+ self.currkey = self.keyfunc(self.currvalue)
+ self.tgtkey = self.currkey
+ return (self.currkey, self._grouper(self.tgtkey))
+
+ def _grouper(self, tgtkey):
+ while self.currkey == tgtkey:
+ yield self.currvalue
+ self.currvalue = next(self.it) # Exit on StopIteration
+ self.currkey = self.keyfunc(self.currvalue)
+
+
+class NetAppESeriesStoragePool(object):
+ def __init__(self):
+ self._sp_drives_cached = None
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ criteria_size_unit=dict(default='gb', type='str'),
+ criteria_drive_count=dict(type='int'),
+ criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
+ type='str'),
+ criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'),
+ criteria_drive_min_size=dict(type='int'),
+ criteria_drive_require_fde=dict(type='bool'),
+ criteria_min_usable_capacity=dict(type='int'),
+ raid_level=dict(
+ choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']),
+ erase_secured_drives=dict(type='bool'),
+ log_path=dict(type='str'),
+ remove_drives=dict(type='list'),
+ secure_pool=dict(type='bool', default=False),
+ reserve_drive_count=dict(type='int'),
+ remove_volumes=dict(type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['raid_level'])
+ ],
+ mutually_exclusive=[
+
+ ],
+ # TODO: update validation for various selection criteria
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ log_path = p['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=log_path)
+
+ self.state = p['state']
+ self.ssid = p['ssid']
+ self.name = p['name']
+ self.validate_certs = p['validate_certs']
+
+ self.criteria_drive_count = p['criteria_drive_count']
+ self.criteria_drive_type = p['criteria_drive_type']
+ self.criteria_size_unit = p['criteria_size_unit']
+ self.criteria_drive_min_size = p['criteria_drive_min_size']
+ self.criteria_min_usable_capacity = p['criteria_min_usable_capacity']
+ self.criteria_drive_interface_type = p['criteria_drive_interface_type']
+ self.criteria_drive_require_fde = p['criteria_drive_require_fde']
+
+ self.raid_level = p['raid_level']
+ self.erase_secured_drives = p['erase_secured_drives']
+ self.remove_drives = p['remove_drives']
+ self.secure_pool = p['secure_pool']
+ self.reserve_drive_count = p['reserve_drive_count']
+ self.remove_volumes = p['remove_volumes']
+
+ try:
+ self.api_usr = p['api_username']
+ self.api_pwd = p['api_password']
+ self.api_url = p['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username "
+ "and api_password and api_url to the module.")
+
+ self.post_headers = dict(Accept="application/json")
+ self.post_headers['Content-Type'] = 'application/json'
+
+ # Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12.
+ # Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we
+ # can copy/paste to other modules more easily.
+ # Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects
+ # the first set that matches the specified count and/or aggregate capacity.
+ # class DriveSelector(object):
+ def filter_drives(
+ self,
+ drives, # raw drives resp
+ interface_type=None, # sas, sata, fibre, etc
+ drive_type=None, # ssd/hdd
+ spindle_speed=None, # 7200, 10000, 15000, ssd (=0)
+ min_drive_size=None,
+ max_drive_size=None,
+ fde_required=None,
+ size_unit='gb',
+ min_total_capacity=None,
+ min_drive_count=None,
+ exact_drive_count=None,
+ raid_level=None
+ ):
+ if min_total_capacity is None and exact_drive_count is None:
+ raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.")
+
+ if min_total_capacity:
+ min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
+
+ # filter clearly invalid/unavailable drives first
+ drives = select(lambda d: self._is_valid_drive(d), drives)
+
+ if interface_type:
+ drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
+
+ if drive_type:
+ drives = select(lambda d: d['driveMediaType'] == drive_type, drives)
+
+ if spindle_speed is not None: # 0 is valid for ssds
+ drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives)
+
+ if min_drive_size:
+ min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit]
+ drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives)
+
+ if max_drive_size:
+ max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit]
+ drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives)
+
+ if fde_required:
+ drives = select(lambda d: d['fdeCapable'], drives)
+
+ # initial implementation doesn't have a preference for any of these values...
+ # just return the first set we find that matches the requested disk count and/or minimum total capacity
+ for (cur_capacity, drives_by_capacity) in groupby(drives, lambda d: int(d['rawCapacity'])):
+ for (cur_interface_type, drives_by_interface_type) in groupby(drives_by_capacity,
+ lambda d: d['phyDriveType']):
+ for (cur_drive_type, drives_by_drive_type) in groupby(drives_by_interface_type,
+ lambda d: d['driveMediaType']):
+ # listify so we can consume more than once
+ drives_by_drive_type = list(drives_by_drive_type)
+ candidate_set = list() # reset candidate list on each iteration of the innermost loop
+
+ if exact_drive_count:
+ if len(drives_by_drive_type) < exact_drive_count:
+ continue # we know this set is too small, move on
+
+ for drive in drives_by_drive_type:
+ candidate_set.append(drive)
+ if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity,
+ min_drive_count=min_drive_count,
+ exact_drive_count=exact_drive_count, raid_level=raid_level):
+ return candidate_set
+
+ raise Exception("couldn't find an available set of disks to match specified criteria")
+
+ def _is_valid_drive(self, d):
+ is_valid = d['available'] \
+ and d['status'] == 'optimal' \
+ and not d['pfa'] \
+ and not d['removed'] \
+ and not d['uncertified'] \
+ and not d['invalidDriveData'] \
+ and not d['nonRedundantAccess']
+
+ return is_valid
+
+ def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None,
+ exact_drive_count=None, raid_level=None):
+ if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count,
+ exact_drive_count=exact_drive_count, raid_level=raid_level):
+ return False
+ # TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set
+ if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']),
+ len(candidate_set),
+ raid_level=raid_level) < min_capacity_bytes:
+ return False
+
+ return True
+
+ def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None):
+ if raid_level in [None, 'raid0']:
+ return disk_size_bytes * disk_count
+ if raid_level == 'raid1':
+ return (disk_size_bytes * disk_count) / 2
+ if raid_level in ['raid3', 'raid5']:
+ return (disk_size_bytes * disk_count) - disk_size_bytes
+ if raid_level in ['raid6', 'raidDiskPool']:
+ return (disk_size_bytes * disk_count) - (disk_size_bytes * 2)
+ raise Exception("unsupported raid_level: %s" % raid_level)
+
+ def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None):
+ if exact_drive_count and exact_drive_count != drive_count:
+ return False
+ if raid_level == 'raidDiskPool':
+ if drive_count < 11:
+ return False
+ if raid_level == 'raid1':
+ if drive_count % 2 != 0:
+ return False
+ if raid_level in ['raid3', 'raid5']:
+ if drive_count < 3:
+ return False
+ if raid_level == 'raid6':
+ if drive_count < 4:
+ return False
+ if min_drive_count and drive_count < min_drive_count:
+ return False
+
+ return True
+
+ def get_storage_pool(self, storage_pool_name):
+ # global ifilter
+ self.debug("fetching storage pools")
+ # map the storage pool name to its id
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ rc = err.args[0]
+ if rc == 404 and self.state == 'absent':
+ self.module.exit_json(
+ msg="Storage pool [%s] did not exist." % (self.name))
+ else:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
+ (self.ssid, str(err), self.state, rc))
+
+ self.debug("searching for storage pool '%s'" % storage_pool_name)
+
+ pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
+
+ if pool_detail:
+ found = 'found'
+ else:
+ found = 'not found'
+ self.debug(found)
+
+ return pool_detail
+
+ def get_candidate_disks(self):
+ self.debug("getting candidate disks...")
+
+ # driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
+ # switch back to commented code below if it gets fixed
+ # drives_req = dict(
+ # driveCount = self.criteria_drive_count,
+ # sizeUnit = 'mb',
+ # raidLevel = self.raid_level
+ # )
+ #
+ # if self.criteria_drive_type:
+ # drives_req['driveType'] = self.criteria_drive_type
+ # if self.criteria_disk_min_aggregate_size_mb:
+ # drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
+ #
+ # # TODO: this arg appears to be ignored, uncomment if it isn't
+ # #if self.criteria_disk_min_size_gb:
+ # # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
+ # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
+ #
+ # if rc == 204:
+ # self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
+
+ # disk_ids = [d['id'] for d in drives_resp]
+
+ try:
+ (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs)
+ except:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, str(err)))
+
+ try:
+ candidate_set = self.filter_drives(drives_resp,
+ exact_drive_count=self.criteria_drive_count,
+ drive_type=self.criteria_drive_type,
+ min_drive_size=self.criteria_drive_min_size,
+ raid_level=self.raid_level,
+ size_unit=self.criteria_size_unit,
+ min_total_capacity=self.criteria_min_usable_capacity,
+ interface_type=self.criteria_drive_interface_type,
+ fde_required=self.criteria_drive_require_fde
+ )
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ disk_ids = [d['id'] for d in candidate_set]
+
+ return disk_ids
+
+ def create_storage_pool(self):
+ self.debug("creating storage pool...")
+
+ sp_add_req = dict(
+ raidLevel=self.raid_level,
+ diskDriveIds=self.disk_ids,
+ name=self.name
+ )
+
+ if self.erase_secured_drives:
+ sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives
+
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
+ data=json.dumps(sp_add_req), headers=self.post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs,
+ timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(err)))
+
+ self.pool_detail = self.get_storage_pool(self.name)
+
+ if self.secure_pool:
+ secure_pool_data = dict(securePool=True)
+ try:
+ (retc, r) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
+ data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
+ url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(err)))
+
+ @property
+ def needs_raid_level_migration(self):
+ current_raid_level = self.pool_detail['raidLevel']
+ needs_migration = self.raid_level != current_raid_level
+
+ if needs_migration: # sanity check some things so we can fail early/check-mode
+ if current_raid_level == 'raidDiskPool':
+ self.module.fail_json(msg="raid level cannot be changed for disk pools")
+
+ return needs_migration
+
+ def migrate_raid_level(self):
+ self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
+ sp_raid_migrate_req = dict(
+ raidLevel=self.raid_level
+ )
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid,
+ self.name),
+ data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST',
+ url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ @property
+ def sp_drives(self, exclude_hotspares=True):
+ if not self._sp_drives_cached:
+
+ self.debug("fetching drive list...")
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, str(err)))
+
+ sp_id = self.pool_detail['id']
+ if exclude_hotspares:
+ self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']]
+ else:
+ self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id]
+
+ return self._sp_drives_cached
+
+ @property
+ def reserved_drive_count_differs(self):
+ if int(self.pool_detail['volumeGroupData']['diskPoolData'][
+ 'reconstructionReservedDriveCount']) != self.reserve_drive_count:
+ return True
+ return False
+
+ @property
+ def needs_expansion(self):
+ if self.criteria_drive_count > len(self.sp_drives):
+ return True
+ # TODO: is totalRaidedSpace the best attribute for "how big is this SP"?
+ if self.criteria_min_usable_capacity and \
+ (self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']):
+ return True
+
+ return False
+
+ def get_expansion_candidate_drives(self):
+ # sanity checks; don't call this if we can't/don't need to expand
+ if not self.needs_expansion:
+ self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion")
+
+ self.debug("fetching expansion candidate drives...")
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
+ self.pool_detail['id']),
+ method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ current_drive_count = len(self.sp_drives)
+ current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use?
+
+ if self.criteria_min_usable_capacity:
+ requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]
+ else:
+ requested_capacity_bytes = current_capacity_bytes
+
+ if self.criteria_drive_count:
+ minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1)
+ else:
+ minimum_disks_to_add = 1
+
+ minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0)
+
+ # FUTURE: allow more control over expansion candidate selection?
+ # loop over candidate disk sets and add until we've met both criteria
+
+ added_drive_count = 0
+ added_capacity_bytes = 0
+
+ drives_to_add = set()
+
+ for s in resp:
+ # don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets
+ candidate_drives = s['drives']
+ if len(drives_to_add.intersection(candidate_drives)) != 0:
+ # duplicate, skip
+ continue
+ drives_to_add.update(candidate_drives)
+ added_drive_count += len(candidate_drives)
+ added_capacity_bytes += int(s['usableCapacity'])
+
+ if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add:
+ break
+
+ if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add):
+ self.module.fail_json(
+ msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % (
+ minimum_disks_to_add, minimum_bytes_to_add))
+
+ return list(drives_to_add)
+
+ def expand_storage_pool(self):
+ drives_to_add = self.get_expansion_candidate_drives()
+
+ self.debug("adding %s drives to storage pool..." % len(drives_to_add))
+ sp_expand_req = dict(
+ drives=drives_to_add
+ )
+ try:
+ request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
+ self.pool_detail['id']),
+ data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(
+ err)))
+
+ # TODO: check response
+ # TODO: support blocking wait?
+
+ def reduce_drives(self, drive_list):
+ if all(drive in drive_list for drive in self.sp_drives):
+ # all the drives passed in are present in the system
+ pass
+ else:
+ self.module.fail_json(
+ msg="One of the drives you wish to remove does not currently exist in the storage pool you specified")
+
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid,
+ self.pool_detail['id']),
+ data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ def update_reserve_drive_count(self, qty):
+ data = dict(reservedDriveCount=qty)
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
+ data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(
+ err)))
+
+ def apply(self):
+ changed = False
+ pool_exists = False
+
+ self.pool_detail = self.get_storage_pool(self.name)
+
+ if self.pool_detail:
+ pool_exists = True
+ pool_id = self.pool_detail['id']
+
+ if self.state == 'absent':
+ self.debug("CHANGED: storage pool exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # sanity checks first- we can't change these, so we'll bomb if they're specified
+ if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']:
+ self.module.fail_json(
+ msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'],
+ self.criteria_drive_type))
+
+ # now the things we can change...
+ if self.needs_expansion:
+ self.debug("CHANGED: storage pool needs expansion")
+ changed = True
+
+ if self.needs_raid_level_migration:
+ self.debug(
+ "CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
+ self.pool_detail['raidLevel'], self.raid_level))
+ changed = True
+
+ # if self.reserved_drive_count_differs:
+ # changed = True
+
+ # TODO: validate other state details? (pool priority, alert threshold)
+
+ # per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction
+ # presents a difficult parameter issue, as the disk count can increase due to expansion, so we
+ # can't just use disk count > criteria_drive_count.
+
+ else: # pool does not exist
+ if self.state == 'present':
+ self.debug("CHANGED: storage pool does not exist, but requested state is 'present'")
+ changed = True
+
+ # ensure we can get back a workable set of disks
+ # (doing this early so candidate selection runs under check mode)
+ self.disk_ids = self.get_candidate_disks()
+ else:
+ self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name))
+
+ if changed and not self.module.check_mode:
+ # apply changes
+ if self.state == 'present':
+ if not pool_exists:
+ self.create_storage_pool()
+ else: # pool exists but differs, modify...
+ if self.needs_expansion:
+ self.expand_storage_pool()
+
+ if self.remove_drives:
+ self.reduce_drives(self.remove_drives)
+
+ if self.needs_raid_level_migration:
+ self.migrate_raid_level()
+
+ # if self.reserved_drive_count_differs:
+ # self.update_reserve_drive_count(self.reserve_drive_count)
+
+ if self.secure_pool:
+ secure_pool_data = dict(securePool=True)
+ try:
+ (retc, r) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid,
+ self.pool_detail[
+ 'id']),
+ data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
+ except:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ if int(retc) == 422:
+ self.module.fail_json(
+ msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable")
+
+ elif self.state == 'absent':
+ # delete the storage pool
+ try:
+ remove_vol_opt = ''
+ if self.remove_volumes:
+ remove_vol_opt = '?delete-volumes=true'
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id,
+ remove_vol_opt),
+ method='DELETE',
+ url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(err)))
+
+ self.module.exit_json(changed=changed, **self.pool_detail)
+
+
+def main():
+ sp = NetAppESeriesStoragePool()
+ try:
+ sp.apply()
+ except Exception:
+ e = get_exception()
+ sp.debug("Exception in apply(): \n%s" % format_exc(e))
+ raise
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_volume.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_volume.py
new file mode 100644
index 0000000000..09825c5201
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_volume.py
@@ -0,0 +1,618 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from ansible.module_utils.api import basic_auth_argument_spec
+
+DOCUMENTATION = '''
+---
+module: netapp_e_volume
+version_added: "2.2"
+short_description: Manage storage volumes (standard and thin)
+description:
+ - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ name:
+ required: true
+ description:
+ - The name of the volume to manage
+ storage_pool_name:
+ required: true
+ description:
+ - "Required only when requested state is 'present'. The name of the storage pool the volume should exist on."
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ size:
+ required: true
+ description:
+ - "Required only when state = 'present'. The size of the volume in (size_unit)."
+ segment_size_kb:
+ description:
+ - The segment size of the new volume
+ default: 512
+ thin_provision:
+ description:
+ - Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool).
+ default: False
+ choices: ['yes','no','true','false']
+ thin_volume_repo_size:
+ description:
+ - Initial size of the thin volume repository volume (in size_unit)
+ required: True
+ thin_volume_max_repo_size:
+ description:
+ - Maximum size that the thin volume repository volume will automatically expand to
+ default: same as size (in size_unit)
+ ssd_cache_enabled:
+ description:
+ - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
+ default: None (ignores existing SSD cache setting)
+ choices: ['yes','no','true','false']
+ data_assurance_enabled:
+ description:
+ - If data assurance should be enabled for the volume
+ default: false
+
+# TODO: doc thin volume parameters
+
+author: Kevin Hulquest (@hulquest)
+
+'''
+EXAMPLES = '''
+ - name: No thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewThinVolumeByAnsible
+ state: absent
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: check_volume
+
+
+ - name: No fat volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewVolumeByAnsible
+ state: absent
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: check_volume
+'''
+RETURN = '''
+---
+msg: "Standard volume [workload_vol_1] has been created."
+msg: "Thin volume [workload_thin_vol] has been created."
+msg: "Volume [workload_vol_1] has been expanded."
+msg: "Volume [workload_vol_1] has been deleted."
+msg: "Volume [workload_vol_1] did not exist."
+msg: "Volume [workload_vol_1] already exists."
+'''
+
+import json
+import logging
+import time
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data is None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def ifilter(predicate, iterable):
+ # python 2, 3 generic filtering.
+ if predicate is None:
+ predicate = bool
+ for x in iterable:
+ if predicate(x):
+ yield x
+
+
+class NetAppESeriesVolume(object):
+ def __init__(self):
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self._post_headers = dict(Accept="application/json")
+ self._post_headers['Content-Type'] = 'application/json'
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ storage_pool_name=dict(type='str'),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
+ type='str'),
+ size=dict(type='int'),
+ segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'),
+ ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone
+ data_assurance_enabled=dict(default=False, type='bool'),
+ thin_provision=dict(default=False, type='bool'),
+ thin_volume_repo_size=dict(type='int'),
+ thin_volume_max_repo_size=dict(type='int'),
+ # TODO: add cache, owning controller support, thin expansion policy, etc
+ log_path=dict(type='str'),
+ api_url=dict(type='str'),
+ api_username=dict(type='str'),
+ api_password=dict(type='str'),
+ validate_certs=dict(type='bool'),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['storage_pool_name', 'size']),
+ ('thin_provision', 'true', ['thin_volume_repo_size'])
+ ],
+ supports_check_mode=True)
+ p = self.module.params
+
+ log_path = p['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=log_path)
+
+ self.state = p['state']
+ self.ssid = p['ssid']
+ self.name = p['name']
+ self.storage_pool_name = p['storage_pool_name']
+ self.size_unit = p['size_unit']
+ self.size = p['size']
+ self.segment_size_kb = p['segment_size_kb']
+ self.ssd_cache_enabled = p['ssd_cache_enabled']
+ self.data_assurance_enabled = p['data_assurance_enabled']
+ self.thin_provision = p['thin_provision']
+ self.thin_volume_repo_size = p['thin_volume_repo_size']
+ self.thin_volume_max_repo_size = p['thin_volume_max_repo_size']
+
+ if not self.thin_volume_max_repo_size:
+ self.thin_volume_max_repo_size = self.size
+
+ self.validate_certs = p['validate_certs']
+
+ try:
+ self.api_usr = p['api_username']
+ self.api_pwd = p['api_password']
+ self.api_url = p['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username "
+ "and api_password and api_url to the module.")
+
+ def get_volume(self, volume_name):
+ self.debug('fetching volumes')
+ # fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
+ try:
+ (rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid,
+ str(err)))
+
+ try:
+ self.debug('fetching thin-volumes')
+ (rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
+
+ volumes.extend(thinvols)
+
+ self.debug("searching for volume '%s'" % volume_name)
+ volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
+
+ if volume_detail:
+ self.debug('found')
+ else:
+ self.debug('not found')
+
+ return volume_detail
+
+ def get_storage_pool(self, storage_pool_name):
+ self.debug("fetching storage pools")
+ # map the storage pool name to its id
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
+
+ self.debug("searching for storage pool '%s'" % storage_pool_name)
+ pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
+
+ if pool_detail:
+ self.debug('found')
+ else:
+ self.debug('not found')
+
+ return pool_detail
+
+ def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled):
+ volume_add_req = dict(
+ name=name,
+ poolId=pool_id,
+ sizeUnit=size_unit,
+ size=size,
+ segSize=segment_size_kb,
+ dataAssuranceEnabled=data_assurance_enabled,
+ )
+
+ self.debug("creating volume '%s'" % name)
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
+ data=json.dumps(volume_add_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
+ str(err)))
+
+ def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size,
+ thin_volume_max_repo_size, data_assurance_enabled):
+ thin_volume_add_req = dict(
+ name=name,
+ poolId=pool_id,
+ sizeUnit=size_unit,
+ virtualSize=size,
+ repositorySize=thin_volume_repo_size,
+ maximumRepositorySize=thin_volume_max_repo_size,
+ dataAssuranceEnabled=data_assurance_enabled,
+ )
+
+ self.debug("creating thin-volume '%s'" % name)
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
+ data=json.dumps(thin_volume_add_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ def delete_volume(self):
+ # delete the volume
+ self.debug("deleting volume '%s'" % self.volume_detail['name'])
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
+ self.volume_detail['id']),
+ method='DELETE', url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs, timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
+ str(err)))
+
+ @property
+ def volume_resource_name(self):
+ if self.volume_detail['thinProvisioned']:
+ return 'thin-volumes'
+ else:
+ return 'volumes'
+
+ @property
+ def volume_properties_changed(self):
+ return self.volume_ssdcache_setting_changed # or with other props here when extended
+
+ # TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold
+
+ @property
+ def volume_ssdcache_setting_changed(self):
+ # None means ignore existing setting
+ if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']:
+ self.debug("flash cache setting changed")
+ return True
+
+ def update_volume_properties(self):
+ update_volume_req = dict()
+
+ # conditionally add values so we ignore unspecified props
+ if self.volume_ssdcache_setting_changed:
+ update_volume_req['flashCache'] = self.ssd_cache_enabled
+
+ self.debug("updating volume properties...")
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name,
+ self.volume_detail['id']),
+ data=json.dumps(update_volume_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ @property
+ def volume_needs_expansion(self):
+ current_size_bytes = int(self.volume_detail['capacity'])
+ requested_size_bytes = self.size * self._size_unit_map[self.size_unit]
+
+ # TODO: check requested/current repo volume size for thin-volumes as well
+
+ # TODO: do we need to build any kind of slop factor in here?
+ return requested_size_bytes > current_size_bytes
+
+ def expand_volume(self):
+ is_thin = self.volume_detail['thinProvisioned']
+ if is_thin:
+ # TODO: support manual repo expansion as well
+ self.debug('expanding thin volume')
+ thin_volume_expand_req = dict(
+ newVirtualSize=self.size,
+ sizeUnit=self.size_unit
+ )
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid,
+ self.volume_detail[
+ 'id']),
+ data=json.dumps(thin_volume_expand_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs, timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ # TODO: check return code
+ else:
+ self.debug('expanding volume')
+ volume_expand_req = dict(
+ expansionSize=self.size,
+ sizeUnit=self.size_unit
+ )
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
+ self.volume_detail['id']),
+ data=json.dumps(volume_expand_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ self.debug('polling for completion...')
+
+ while True:
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
+ self.volume_detail[
+ 'id']),
+ method='GET', url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % (
+ self.name, self.ssid, str(err)))
+
+ action = resp['action']
+ percent_complete = resp['percentComplete']
+
+ self.debug('expand action %s, %s complete...' % (action, percent_complete))
+
+ if action == 'none':
+ self.debug('expand complete')
+ break
+ else:
+ time.sleep(5)
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ msg = None
+
+ self.volume_detail = self.get_volume(self.name)
+
+ if self.volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ self.debug("CHANGED: volume exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # check requested volume size, see if expansion is necessary
+ if self.volume_needs_expansion:
+ self.debug(
+ "CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
+ self.size_unit,
+ self.volume_detail[
+ 'capacity']))
+ changed = True
+
+ if self.volume_properties_changed:
+ self.debug("CHANGED: one or more volume properties have changed")
+ changed = True
+
+ else:
+ if self.state == 'present':
+ self.debug("CHANGED: volume does not exist, but requested state is 'present'")
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ self.debug('skipping changes due to check mode')
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ pool_detail = self.get_storage_pool(self.storage_pool_name)
+
+ if not pool_detail:
+ self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
+
+ if self.thin_provision and not pool_detail['diskPool']:
+ self.module.fail_json(
+ msg='Thin provisioned volumes can only be located on disk pools (not volume groups)')
+
+ pool_id = pool_detail['id']
+
+ if not self.thin_provision:
+ self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb,
+ self.data_assurance_enabled)
+ msg = "Standard volume [%s] has been created." % (self.name)
+
+ else:
+ self.create_thin_volume(pool_id, self.name, self.size_unit, self.size,
+ self.thin_volume_repo_size, self.thin_volume_max_repo_size,
+ self.data_assurance_enabled)
+ msg = "Thin volume [%s] has been created." % (self.name)
+
+ else: # volume exists but differs, modify...
+ if self.volume_needs_expansion:
+ self.expand_volume()
+ msg = "Volume [%s] has been expanded." % (self.name)
+
+ # this stuff always needs to run on present (since props can't be set on creation)
+ if self.volume_properties_changed:
+ self.update_volume_properties()
+ msg = "Properties of volume [%s] has been updated." % (self.name)
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ msg = "Volume [%s] has been deleted." % (self.name)
+ else:
+ self.debug("exiting with no changes")
+ if self.state == 'absent':
+ msg = "Volume [%s] did not exist." % (self.name)
+ else:
+ msg = "Volume [%s] already exists." % (self.name)
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = NetAppESeriesVolume()
+
+ try:
+ v.apply()
+ except Exception:
+ e = get_exception()
+ v.debug("Exception in apply(): \n%s" % format_exc(e))
+ v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py b/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py
new file mode 100644
index 0000000000..f715c84088
--- /dev/null
+++ b/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py
@@ -0,0 +1,439 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = """
+---
+module: netapp_e_volume_copy
+short_description: Create volume copy pairs
+description:
+ - Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ source_volume_id:
+ description:
+ - The the id of the volume copy source.
+ - If used, must be paired with destination_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ destination_volume_id:
+ description:
+ - The the id of the volume copy destination.
+ - If used, must be paired with source_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ volume_copy_pair_id:
+ description:
+ - The the id of a given volume copy pair
+ - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
+ - Can use to delete or check presence of volume pairs
+ - Must specify this or (destination_volume_id and source_volume_id)
+ state:
+ description:
+ - Whether the specified volume copy pair should exist or not.
+ required: True
+ choices: ['present', 'absent']
+ create_copy_pair_if_does_not_exist:
+ description:
+ - Defines if a copy pair will be created if it does not exist.
+ - If set to True destination_volume_id and source_volume_id are required.
+ choices: [True, False]
+ default: True
+ start_stop_copy:
+ description:
+ - starts a re-copy or stops a copy in progress
+ - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
+ - Requires volume_copy_pair_id
+ search_volume_id:
+ description:
+ - Searches for all valid potential target and source volumes that could be used in a copy_pair
+ - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
+"""
+RESULTS = """
+"""
+EXAMPLES = """
+---
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Json facts for the volume copy that was created.
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Created Volume Copy Pair with ID
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, method='GET', url_username=params['api_username'],
+ url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ volume_copy_pair_id = None
+ for potential_copy_pair in resp:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ volume_copy_pair_id = potential_copy_pair['id']
+
+ return volume_copy_pair_id
+
+
+def create_copy_pair(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ rData = {
+ "sourceId": params['source_volume_id'],
+ "targetId": params['destination_volume_id']
+ }
+
+ (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def delete_copy_pair_by_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 204:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def start_stop_copy(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
+ params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ return True, response_data[0]['percentComplete']
+ else:
+ return False, response_data
+
+
+def check_copy_status(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ if response_data['percentComplete'] != -1:
+
+ return True, response_data['percentComplete']
+ else:
+ return False, response_data['percentComplete']
+ else:
+ return False, response_data
+
+
+def find_valid_copy_pair_targets_and_sources(params):
+ get_status = 'storage-systems/%s/volumes' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ source_capacity = None
+ candidates = []
+ for volume in response_data:
+ if volume['id'] == params['search_volume_id']:
+ source_capacity = volume['capacity']
+ else:
+ candidates.append(volume)
+
+ potential_sources = []
+ potential_targets = []
+
+ for volume in candidates:
+ if volume['capacity'] > source_capacity:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_targets.append(volume['id'])
+ else:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_sources.append(volume['id'])
+
+ return potential_targets, potential_sources
+
+ else:
+ raise Exception("Response [%s]" % response_code)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ source_volume_id=dict(type='str'),
+ destination_volume_id=dict(type='str'),
+ copy_priority=dict(required=False, default=0, type='int'),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, default=True),
+ targetWriteProtected=dict(required=False, default=True, type='bool'),
+ onlineCopy=dict(required=False, default=False, type='bool'),
+ volume_copy_pair_id=dict(type='str'),
+ status=dict(required=True, choices=['present', 'absent'], type='str'),
+ create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
+ start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
+ search_volume_id=dict(type='str'),
+ ),
+ mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
+ ['volume_copy_pair_id', 'source_volume_id'],
+ ['volume_copy_pair_id', 'search_volume_id'],
+ ['search_volume_id', 'destination_volume_id'],
+ ['search_volume_id', 'source_volume_id'],
+ ],
+ required_together=[['source_volume_id', 'destination_volume_id'],
+ ],
+ required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
+ ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
+ ["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
+ ]
+
+ )
+ params = module.params
+
+ if not params['api_url'].endswith('/'):
+ params['api_url'] += '/'
+
+ # Check if we want to search
+ if params['search_volume_id'] is not None:
+ try:
+ potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
+ except:
+ e = get_exception()
+ module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
+
+ module.exit_json(changed=False,
+ msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
+ search_volume_id=params['search_volume_id'],
+ valid_targets=potential_targets,
+ valid_sources=potential_sources)
+
+ # Check if we want to start or stop a copy operation
+ if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
+
+ # Get the current status info
+ currenty_running, status_info = check_copy_status(params)
+
+ # If we want to start
+ if params['start_stop_copy'] == 'start':
+
+ # If we have already started
+ if currenty_running is True:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
+ # If we need to start
+ else:
+
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
+ else:
+ module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
+
+ # If we want to stop
+ else:
+ # If it has already stopped
+ if currenty_running is False:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+
+ # If we need to stop it
+ else:
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
+
+ # If we want the copy pair to exist we do this stuff
+ if params['status'] == 'present':
+
+ # We need to check if it exists first
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # If no volume copy pair is found we need need to make it.
+ if params['volume_copy_pair_id'] is None:
+
+ # In order to create we can not do so with just a volume_copy_pair_id
+
+ copy_began_status, (rc, resp) = create_copy_pair(params)
+
+ if copy_began_status is True:
+ module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
+ else:
+ module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
+
+ # If it does exist we do nothing
+ else:
+ # We verify that it exists
+ exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
+ params)
+
+ if exist_status:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
+ else:
+ if exist_status_code == 404:
+ module.fail_json(
+ msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
+ params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
+ exist_status_code, exist_status_data))
+
+ module.fail_json(msg="Done")
+
+ # If we want it to not exist we do this
+ else:
+
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # We delete it by the volume_copy_pair_id
+ delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
+
+ if delete_status is True:
+ module.exit_json(changed=True,
+ msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
+ else:
+ if delete_status_code == 404:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
+ delete_status_code, delete_status_data))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/system/__init__.py b/lib/ansible/modules/extras/system/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/system/__init__.py
diff --git a/lib/ansible/modules/extras/system/alternatives.py b/lib/ansible/modules/extras/system/alternatives.py
new file mode 100644
index 0000000000..09c8d8ad3e
--- /dev/null
+++ b/lib/ansible/modules/extras/system/alternatives.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage symbolic link alternatives.
+(c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
+(c) 2015, David Wittman <dwittman@gmail.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+version_added: "1.6"
+author:
+ - "David Wittman (@DavidWittman)"
+ - "Gabe Mulley (@mulby)"
+options:
+ name:
+ description:
+ - The generic name of the link.
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ - This option is required on RHEL-based distributions
+ required: false
+ priority:
+ description:
+ - The priority of the alternative
+ required: false
+ default: 50
+ version_added: "2.2"
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = '''
+- name: correct java version selected
+ alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: alternatives link created
+ alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
+
+- name: make java 32 bit an alternative with low priority
+ alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-i386/jre/bin/java priority=-10
+'''
+
+import re
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ path = dict(required=True, type='path'),
+ link = dict(required=False, type='path'),
+ priority = dict(required=False, type='int',
+ default=50),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ name = params['name']
+ path = params['path']
+ link = params['link']
+ priority = params['priority']
+
+ UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True)
+
+ current_path = None
+ all_alternatives = []
+
+ # Run `update-alternatives --display <name>` to find existing alternatives
+ (rc, display_output, _) = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
+ )
+
+ if rc == 0:
+ # Alternatives already exist for this link group
+ # Parse the output to determine the current path of the symlink and
+ # available alternatives
+ current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
+ re.MULTILINE)
+ alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
+
+ current_path = current_path_regex.search(display_output).group(1)
+ all_alternatives = alternative_regex.findall(display_output)
+
+ if not link:
+ # Read the current symlink target from `update-alternatives --query`
+ # in case we need to install the new alternative before setting it.
+ #
+ # This is only compatible on Debian-based systems, as the other
+ # alternatives don't have --query available
+ rc, query_output, _ = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
+ )
+ if rc == 0:
+ for line in query_output.splitlines():
+ if line.startswith('Link:'):
+ link = line.split()[1]
+ break
+
+ if current_path != path:
+ if module.check_mode:
+ module.exit_json(changed=True, current_path=current_path)
+ try:
+ # install the requested path if necessary
+ if path not in all_alternatives:
+ if not link:
+ module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
+
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
+ check_rc=True
+ )
+
+ # select the requested path
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--set', name, path],
+ check_rc=True
+ )
+
+ module.exit_json(changed=True)
+ except subprocess.CalledProcessError:
+ e = get_exception()
+ module.fail_json(msg=str(dir(cpe)))
+ else:
+ module.exit_json(changed=False)
+
+
+main()
diff --git a/lib/ansible/modules/extras/system/at.py b/lib/ansible/modules/extras/system/at.py
new file mode 100644
index 0000000000..0ce9ff2c7d
--- /dev/null
+++ b/lib/ansible/modules/extras/system/at.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: at
+short_description: Schedule the execution of a command or script file via the at command.
+description:
+ - Use this module to schedule a command or script file to run once in the future.
+ - All jobs are executed in the 'a' queue.
+version_added: "1.5"
+options:
+ command:
+ description:
+ - A command to be executed in the future.
+ required: false
+ default: null
+ script_file:
+ description:
+ - An existing script file to be executed in the future.
+ required: false
+ default: null
+ count:
+ description:
+ - The count of units in the future to execute the command or script file.
+ required: true
+ units:
+ description:
+ - The type of units in the future to execute the command or script file.
+ required: true
+ choices: ["minutes", "hours", "days", "weeks"]
+ state:
+ description:
+ - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+ unique:
+ description:
+ - If a matching job is present a new job will not be added.
+ required: false
+ default: false
+requirements:
+ - at
+author: "Richard Isaacson (@risaacson)"
+'''
+
+EXAMPLES = '''
+# Schedule a command to execute in 20 minutes as root.
+- at: command="ls -d / > /dev/null" count=20 units="minutes"
+
+# Match a command to an existing job and delete the job.
+- at: command="ls -d / > /dev/null" state="absent"
+
+# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
+- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes"
+'''
+
+import os
+import tempfile
+
+
+def add_job(module, result, at_cmd, count, units, command, script_file):
+ at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units)
+ rc, out, err = module.run_command(at_command, check_rc=True)
+ if command:
+ os.unlink(script_file)
+ result['changed'] = True
+
+
+def delete_job(module, result, at_cmd, command, script_file):
+ for matching_job in get_matching_jobs(module, at_cmd, script_file):
+ at_command = "%s -d %s" % (at_cmd, matching_job)
+ rc, out, err = module.run_command(at_command, check_rc=True)
+ result['changed'] = True
+ if command:
+ os.unlink(script_file)
+ module.exit_json(**result)
+
+
+def get_matching_jobs(module, at_cmd, script_file):
+ matching_jobs = []
+
+ atq_cmd = module.get_bin_path('atq', True)
+
+ # Get list of job numbers for the user.
+ atq_command = "%s" % atq_cmd
+ rc, out, err = module.run_command(atq_command, check_rc=True)
+ current_jobs = out.splitlines()
+ if len(current_jobs) == 0:
+ return matching_jobs
+
+ # Read script_file into a string.
+ script_file_string = open(script_file).read().strip()
+
+ # Loop through the jobs.
+ # If the script text is contained in a job add job number to list.
+ for current_job in current_jobs:
+ split_current_job = current_job.split()
+ at_command = "%s -c %s" % (at_cmd, split_current_job[0])
+ rc, out, err = module.run_command(at_command, check_rc=True)
+ if script_file_string in out:
+ matching_jobs.append(split_current_job[0])
+
+ # Return the list.
+ return matching_jobs
+
+
+def create_tempfile(command):
+ filed, script_file = tempfile.mkstemp(prefix='at')
+ fileh = os.fdopen(filed, 'w')
+ fileh.write(command)
+ fileh.close()
+ return script_file
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ command=dict(required=False,
+ type='str'),
+ script_file=dict(required=False,
+ type='str'),
+ count=dict(required=False,
+ type='int'),
+ units=dict(required=False,
+ default=None,
+ choices=['minutes', 'hours', 'days', 'weeks'],
+ type='str'),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'absent'],
+ type='str'),
+ unique=dict(required=False,
+ default=False,
+ type='bool')
+ ),
+ mutually_exclusive=[['command', 'script_file']],
+ required_one_of=[['command', 'script_file']],
+ supports_check_mode=False
+ )
+
+ at_cmd = module.get_bin_path('at', True)
+
+ command = module.params['command']
+ script_file = module.params['script_file']
+ count = module.params['count']
+ units = module.params['units']
+ state = module.params['state']
+ unique = module.params['unique']
+
+ if (state == 'present') and (not count or not units):
+ module.fail_json(msg="present state requires count and units")
+
+ result = {'state': state, 'changed': False}
+
+ # If command transform it into a script_file
+ if command:
+ script_file = create_tempfile(command)
+
+ # if absent remove existing and return
+ if state == 'absent':
+ delete_job(module, result, at_cmd, command, script_file)
+
+ # if unique if existing return unchanged
+ if unique:
+ if len(get_matching_jobs(module, at_cmd, script_file)) != 0:
+ if command:
+ os.unlink(script_file)
+ module.exit_json(**result)
+
+ result['script_file'] = script_file
+ result['count'] = count
+ result['units'] = units
+
+ add_job(module, result, at_cmd, count, units, command, script_file)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/capabilities.py b/lib/ansible/modules/extras/system/capabilities.py
new file mode 100644
index 0000000000..aa0785f6f6
--- /dev/null
+++ b/lib/ansible/modules/extras/system/capabilities.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Nate Coraor <nate@bx.psu.edu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+version_added: "1.6"
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ required: true
+ default: null
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ required: true
+ default: null
+ aliases: [ 'cap' ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ choices: [ "present", "absent" ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags
+ into the effective set, so (for example, cap_foo=ep will probably become
+ cap_foo+ep). This module does not attempt to determine the final operator
+ and flags to compare, so you will want to ensure that your capabilities
+ argument matches the final capabilities.
+requirements: []
+author: "Nate Coraor (@natefoo)"
+'''
+
+EXAMPLES = '''
+# Set cap_sys_chroot+ep on /foo
+- capabilities: path=/foo capability=cap_sys_chroot+ep state=present
+
+# Remove cap_net_bind_service from /bar
+- capabilities: path=/bar capability=cap_net_bind_service state=absent
+'''
+
+
+OPS = ( '=', '-', '+' )
+
+# ==============================================================
+
+import os
+import tempfile
+import re
+
+class CapabilitiesModule(object):
+
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [ cap[0] for cap in current ]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ # add new cap with correct op/flags
+ current.append( self.capability_tup )
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not eixst the output will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ caps = stdout.split(' =')[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append( ( subcap, op, flags ) )
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([ ''.join(cap) for cap in caps ])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+# ==============================================================
+# main
+
+def main():
+
+ # defining module
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(aliases=['key'], required=True),
+ capability = dict(aliases=['cap'], required=True),
+ state = dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True
+ )
+
+ CapabilitiesModule(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/cronvar.py b/lib/ansible/modules/extras/system/cronvar.py
new file mode 100644
index 0000000000..21f92be964
--- /dev/null
+++ b/lib/ansible/modules/extras/system/cronvar.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# Cronvar Plugin: The goal of this plugin is to provide an indempotent
+# method for set cron variable values. It should play well with the
+# existing cron module as well as allow for manually added variables.
+# Each variable entered will be preceded with a comment describing the
+# variable so that it can be found later. This is required to be
+# present in order for this plugin to find/modify the variable
+#
+# This module is based on the crontab module.
+#
+
+DOCUMENTATION = """
+---
+module: cronvar
+short_description: Manage variables in crontabs
+description:
+ - Use this module to manage crontab variables. This module allows
+ you to create, update, or delete cron variable definitions.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - Name of the crontab variable.
+ default: null
+ required: true
+ value:
+ description:
+ - The value to set this variable to. Required if state=present.
+ required: false
+ default: null
+ insertafter:
+ required: false
+ default: null
+ description:
+ - Used with C(state=present). If specified, the variable will be inserted
+ after the variable specified.
+ insertbefore:
+ required: false
+ default: null
+ description:
+ - Used with C(state=present). If specified, the variable will be inserted
+ just before the variable specified.
+ state:
+ description:
+ - Whether to ensure that the variable is present or absent.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ required: false
+ default: root
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ Without a leading /, this is assumed to be in /etc/cron.d. With a leading
+ /, this is taken as absolute.
+ required: false
+ default: null
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup) variable by this module.
+ required: false
+ default: false
+requirements:
+ - cron
+author: "Doug Luce (@dougluce)"
+"""
+
+EXAMPLES = '''
+# Ensure a variable exists.
+# Creates an entry like "EMAIL=doug@ansibmod.con.com"
+- cronvar: name="EMAIL" value="doug@ansibmod.con.com"
+
+# Make sure a variable is gone. This will remove any variable named
+# "LEGACY"
+- cronvar: name="LEGACY" state=absent
+
+# Adds a variable to a file under /etc/cron.d
+- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log"
+ user="root" cron_file=ansible_yum-autoupdate
+'''
+
+import os
+import re
+import tempfile
+import platform
+import pipes
+import shlex
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+CRONCMD = "/usr/bin/crontab"
+
+class CronVarError(Exception):
+ pass
+
+class CronVar(object):
+ """
+ CronVar object to write variables to crontabs.
+
+ user - the user of the crontab (defaults to root)
+ cron_file - a cron file under /etc/cron.d
+ """
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ if self.user is None:
+ self.user = 'root'
+ self.lines = None
+ self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', ))
+
+ if cron_file:
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.cron_file, 'r')
+ self.lines = f.read().splitlines()
+ f.close()
+ except IOError:
+ e = get_exception()
+ # cron file does not exist
+ return
+ except:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronVarError("Unable to read crontab")
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
+ not re.match( r'# \(/tmp/.*installed on.*\)', l) and
+ not re.match( r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ count += 1
+
+ def log_message(self, message):
+ self.module.debug('ansible: "%s"' % message)
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'w')
+ elif self.cron_file:
+ fileh = open(self.cron_file, 'w')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(self.render())
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ def remove_variable_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ e = get_exception()
+ # cron file does not exist
+ return False
+ except:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+
+ def parse_for_var(self, line):
+ lexer = shlex.shlex(line)
+ lexer.wordchars = self.wordchars
+ varname = lexer.get_token()
+ is_env_var = lexer.get_token() == '='
+ value = ''.join(lexer)
+ if is_env_var:
+ return (varname, value)
+ raise CronVarError("Not a variable.")
+
+ def find_variable(self, name):
+ comment = None
+ for l in self.lines:
+ try:
+ (varname, value) = self.parse_for_var(l)
+ if varname == name:
+ return value
+ except CronVarError:
+ pass
+ return None
+
+ def get_var_names(self):
+ var_names = []
+ for l in self.lines:
+ try:
+ (var_name, _) = self.parse_for_var(l)
+ var_names.append(var_name)
+ except CronVarError:
+ pass
+ return var_names
+
+ def add_variable(self, name, value, insertbefore, insertafter):
+ if insertbefore is None and insertafter is None:
+ # Add the variable to the top of the file.
+ self.lines.insert(0, "%s=%s" % (name, value))
+ else:
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname == insertbefore:
+ newlines.append("%s=%s" % (name, value))
+ newlines.append(l)
+ elif varname == insertafter:
+ newlines.append(l)
+ newlines.append("%s=%s" % (name, value))
+ else:
+ raise CronVarError # Append.
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def remove_variable(self, name):
+ self.update_variable(name, None, remove=True)
+
+ def update_variable(self, name, value, remove=False):
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname != name:
+ raise CronVarError # Append.
+ if not remove:
+ newlines.append("%s=%s" % (name, value))
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render a proper crontab
+ """
+ result = '\n'.join(self.lines)
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
+ else:
+ user = '-u %s' % pipes.quote(self.user)
+ return "%s %s %s" % (CRONCMD , user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
+ else:
+ user = '-u %s' % pipes.quote(self.user)
+ return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
+
+#==================================================
+
+def main():
+ # The following example playbooks:
+ #
+ # - cronvar: name="SHELL" value="/bin/bash"
+ #
+ # - name: Set the email
+ # cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
+ #
+ # - name: Get rid of the old new host variable
+ # cronvar: name="NEW_HOST" state=absent
+ #
+ # Would produce:
+ # SHELL = /bin/bash
+ # EMAILTO = doug@ansibmod.con.com
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ value=dict(required=False),
+ user=dict(required=False),
+ cron_file=dict(required=False),
+ insertafter=dict(default=None),
+ insertbefore=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ backup=dict(default=False, type='bool'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ supports_check_mode=False,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ user = module.params['user']
+ cron_file = module.params['cron_file']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ state = module.params['state']
+ backup = module.params['backup']
+ ensure_present = state == 'present'
+
+ changed = False
+ res_args = dict()
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022',8))
+ cronvar = CronVar(module, user, cron_file)
+
+ module.debug('cronvar instantiated - name: "%s"' % name)
+
+ # --- user input validation ---
+
+ if name is None and ensure_present:
+ module.fail_json(msg="You must specify 'name' to insert a new cron variabale")
+
+ if value is None and ensure_present:
+ module.fail_json(msg="You must specify 'value' to insert a new cron variable")
+
+ if name is None and not ensure_present:
+ module.fail_json(msg="You must specify 'name' to remove a cron variable")
+
+ # if requested make a backup before making a change
+ if backup:
+ (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
+ cronvar.write(backup_file)
+
+ if cronvar.cron_file and not name and not ensure_present:
+ changed = cronvar.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state)
+
+ old_value = cronvar.find_variable(name)
+
+ if ensure_present:
+ if old_value is None:
+ cronvar.add_variable(name, value, insertbefore, insertafter)
+ changed = True
+ elif old_value != value:
+ cronvar.update_variable(name, value)
+ changed = True
+ else:
+ if old_value is not None:
+ cronvar.remove_variable(name)
+ changed = True
+
+ res_args = {
+ "vars": cronvar.get_var_names(),
+ "changed": changed
+ }
+
+ if changed:
+ cronvar.write()
+
+ # retain the backup only if crontab or cron file have changed
+ if backup:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+ # --- should never get here
+ module.exit_json(msg="Unable to execute cronvar task.")
+
+
+main()
diff --git a/lib/ansible/modules/extras/system/crypttab.py b/lib/ansible/modules/extras/system/crypttab.py
new file mode 100644
index 0000000000..ea9698a12c
--- /dev/null
+++ b/lib/ansible/modules/extras/system/crypttab.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve <yo@groks.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: crypttab
+short_description: Encrypted Linux block devices
+description:
+ - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
+version_added: "1.9"
+options:
+ name:
+ description:
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
+ optionaly prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
+ will be stripped from I(name).
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - Use I(present) to add a line to C(/etc/crypttab) or update it's definition
+ if already present. Use I(absent) to remove a line with matching I(name).
+ Use I(opts_present) to add options to those already present; options with
+ different values will be updated. Use I(opts_absent) to remove options from
+ the existing set.
+ required: true
+ choices: [ "present", "absent", "opts_present", "opts_absent"]
+ default: null
+ backing_device:
+ description:
+ - Path to the underlying block device or file, or the UUID of a block-device
+ prefixed with I(UUID=)
+ required: false
+ default: null
+ password:
+ description:
+ - Encryption password, the path to a file containing the pasword, or
+ 'none' or '-' if the password should be entered at boot.
+ required: false
+ default: "none"
+ opts:
+ description:
+ - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ required: false
+ path:
+ description:
+ - Path to file to use instead of C(/etc/crypttab). This might be useful
+ in a chroot environment.
+ required: false
+ default: /etc/crypttab
+
+notes: []
+requirements: []
+author: "Steve (@groks)"
+'''
+
+EXAMPLES = '''
+- name: Set the options explicitly a device which must already exist
+ crypttab: name=luks-home state=present opts=discard,cipher=aes-cbc-essiv:sha256
+
+- name: Add the 'discard' option to any existing options for all devices
+ crypttab: name={{ item.device }} state=opts_present opts=discard
+ with_items: ansible_mounts
+ when: '/dev/mapper/luks-' in {{ item.device }}
+'''
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=True, choices=['present', 'absent', 'opts_present', 'opts_absent']),
+ backing_device = dict(default=None),
+ password = dict(default=None),
+ opts = dict(default=None),
+ path = dict(default='/etc/crypttab')
+ ),
+ supports_check_mode = True
+ )
+
+ backing_device = module.params['backing_device']
+ password = module.params['password']
+ opts = module.params['opts']
+ state = module.params['state']
+ path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
+
+ if state != 'absent' and backing_device is None and password is None and opts is None:
+ module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
+ **module.params)
+
+ if 'opts' in state and (backing_device is not None or password is not None):
+ module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
+ **module.params)
+
+ for arg_name, arg in (('name', name),
+ ('backing_device', backing_device),
+ ('password', password),
+ ('opts', opts)):
+ if (arg is not None
+ and (' ' in arg or '\t' in arg or arg == '')):
+ module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
+ **module.params)
+
+ try:
+ crypttab = Crypttab(path)
+ existing_line = crypttab.match(name)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="failed to open and parse crypttab file: %s" % e,
+ **module.params)
+
+ if 'present' in state and existing_line is None and backing_device is None:
+ module.fail_json(msg="'backing_device' required to add a new entry",
+ **module.params)
+
+ changed, reason = False, '?'
+
+ if state == 'absent':
+ if existing_line is not None:
+ changed, reason = existing_line.remove()
+
+ elif state == 'present':
+ if existing_line is not None:
+ changed, reason = existing_line.set(backing_device, password, opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_present':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.add(opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_absent':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.remove(opts)
+
+
+ if changed and not module.check_mode:
+ try:
+ f = open(path, 'wb')
+ f.write(str(crypttab))
+ finally:
+ f.close()
+
+ module.exit_json(changed=changed, msg=reason, **module.params)
+
+
+class Crypttab(object):
+
+ _lines = []
+
+ def __init__(self, path):
+ self.path = path
+ if not os.path.exists(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path,'a').close()
+
+ try:
+ f = open(path, 'r')
+ for line in f.readlines():
+ self._lines.append(Line(line))
+ finally:
+ f.close()
+
+ def add(self, line):
+ self._lines.append(line)
+ return True, 'added line'
+
+ def lines(self):
+ for line in self._lines:
+ if line.valid():
+ yield line
+
+ def match(self, name):
+ for line in self.lines():
+ if line.name == name:
+ return line
+ return None
+
+ def __str__(self):
+ lines = []
+ for line in self._lines:
+ lines.append(str(line))
+ crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
+ if crypttab[-1] != '\n':
+ crypttab += '\n'
+ return crypttab
+
+
+class Line(object):
+
+ def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
+ self.line = line
+ self.name = name
+ self.backing_device = backing_device
+ self.password = password
+ self.opts = Options(opts)
+
+ if line is not None:
+ if self._line_valid(line):
+ self.name, backing_device, password, opts = self._split_line(line)
+
+ self.set(backing_device, password, opts)
+
+ def set(self, backing_device, password, opts):
+ changed = False
+
+ if backing_device is not None and self.backing_device != backing_device:
+ self.backing_device = backing_device
+ changed = True
+
+ if password is not None and self.password != password:
+ self.password = password
+ changed = True
+
+ if opts is not None:
+ opts = Options(opts)
+ if opts != self.opts:
+ self.opts = opts
+ changed = True
+
+ return changed, 'updated line'
+
+ def _line_valid(self, line):
+ if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
+ return False
+ return True
+
+ def _split_line(self, line):
+ fields = line.split()
+ try:
+ field2 = fields[2]
+ except IndexError:
+ field2 = None
+ try:
+ field3 = fields[3]
+ except IndexError:
+ field3 = None
+
+ return (fields[0],
+ fields[1],
+ field2,
+ field3)
+
+ def remove(self):
+ self.line, self.name, self.backing_device = '', None, None
+ return True, 'removed line'
+
+ def valid(self):
+ if self.name is not None and self.backing_device is not None:
+ return True
+ return False
+
+ def __str__(self):
+ if self.valid():
+ fields = [self.name, self.backing_device]
+ if self.password is not None or self.opts:
+ if self.password is not None:
+ fields.append(self.password)
+ else:
+ self.password('none')
+ if self.opts:
+ fields.append(str(self.opts))
+ return ' '.join(fields)
+ return self.line
+
+
+class Options(dict):
+ """opts_string looks like: 'discard,foo=bar,baz=greeble' """
+
+ def __init__(self, opts_string):
+ super(Options, self).__init__()
+ self.itemlist = []
+ if opts_string is not None:
+ for opt in opts_string.split(','):
+ kv = opt.split('=')
+ if len(kv) > 1:
+ k, v = (kv[0], kv[1])
+ else:
+ k, v = (kv[0], None)
+ self[k] = v
+
+ def add(self, opts_string):
+ changed = False
+ for k, v in Options(opts_string).items():
+ if self.has_key(k):
+ if self[k] != v:
+ changed = True
+ else:
+ changed = True
+ self[k] = v
+ return changed, 'updated options'
+
+ def remove(self, opts_string):
+ changed = False
+ for k in Options(opts_string):
+ if self.has_key(k):
+ del self[k]
+ changed = True
+ return changed, 'removed options'
+
+ def keys(self):
+ return self.itemlist
+
+ def values(self):
+ return [self[key] for key in self]
+
+ def items(self):
+ return [(key, self[key]) for key in self]
+
+ def __iter__(self):
+ return iter(self.itemlist)
+
+ def __setitem__(self, key, value):
+ if not self.has_key(key):
+ self.itemlist.append(key)
+ super(Options, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self.itemlist.remove(key)
+ super(Options, self).__delitem__(key)
+
+ def __ne__(self, obj):
+ return not (isinstance(obj, Options)
+ and sorted(self.items()) == sorted(obj.items()))
+
+ def __str__(self):
+ ret = []
+ for k, v in self.items():
+ if v is None:
+ ret.append(k)
+ else:
+ ret.append('%s=%s' % (k, v))
+ return ','.join(ret)
+
+main()
diff --git a/lib/ansible/modules/extras/system/debconf.py b/lib/ansible/modules/extras/system/debconf.py
new file mode 100644
index 0000000000..05e545a7ed
--- /dev/null
+++ b/lib/ansible/modules/extras/system/debconf.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to configure .deb packages.
+(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: debconf
+short_description: Configure a .deb package
+description:
+ - Configure a .deb package using debconf-set-selections. Or just query
+ existing selections.
+version_added: "1.6"
+notes:
+ - This module requires the command line debconf tools.
+ - A number of questions have to be answered (depending on the package).
+ Use 'debconf-show <package>' on any Debian or derivative with the package
+ installed to see questions/settings available.
+ - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
+requirements: [ debconf, debconf-utils ]
+options:
+ name:
+ description:
+ - Name of package to configure.
+ required: true
+ default: null
+ aliases: ['pkg']
+ question:
+ description:
+ - A debconf configuration setting
+ required: false
+ default: null
+ aliases: ['setting', 'selection']
+ vtype:
+ description:
+ - The type of the value supplied.
+ - C(seen) was added in 2.2.
+ required: false
+ default: null
+ choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
+ value:
+ description:
+ - Value to set the configuration to
+ required: false
+ default: null
+ aliases: ['answer']
+ unseen:
+ description:
+ - Do not set 'seen' flag when pre-seeding
+ required: false
+ default: False
+author: "Brian Coca (@bcoca)"
+
+'''
+
+EXAMPLES = '''
+# Set default locale to fr_FR.UTF-8
+debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select'
+
+# set to generate locales:
+debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect'
+
+# Accept oracle license
+debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
+
+# Specifying package you can register/return the list of questions and current values
+debconf: name='tzdata'
+'''
+
+def get_selections(module, pkg):
+ cmd = [module.get_bin_path('debconf-show', True), pkg]
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ selections = {}
+
+ for line in out.splitlines():
+ (key, value) = line.split(':', 1)
+ selections[ key.strip('*').strip() ] = value.strip()
+
+ return selections
+
+
+def set_selection(module, pkg, question, vtype, value, unseen):
+
+ setsel = module.get_bin_path('debconf-set-selections', True)
+ cmd = [setsel]
+ if unseen:
+ cmd.append('-u')
+
+ if vtype == 'boolean':
+ if value == 'True':
+ value = 'true'
+ elif value == 'False':
+ value = 'false'
+ data = ' '.join([pkg, question, vtype, value])
+
+ return module.run_command(cmd, data=data)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True, aliases=['pkg'], type='str'),
+ question = dict(required=False, aliases=['setting', 'selection'], type='str'),
+ vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
+ value = dict(required=False, type='str', aliases=['answer']),
+ unseen = dict(required=False, type='bool'),
+ ),
+ required_together = ( ['question','vtype', 'value'],),
+ supports_check_mode=True,
+ )
+
+ #TODO: enable passing array of options and/or debconf file from get-selections dump
+ pkg = module.params["name"]
+ question = module.params["question"]
+ vtype = module.params["vtype"]
+ value = module.params["value"]
+ unseen = module.params["unseen"]
+
+ prev = get_selections(module, pkg)
+
+ changed = False
+ msg = ""
+
+ if question is not None:
+ if vtype is None or value is None:
+ module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
+
+ if not question in prev or prev[question] != value:
+ changed = True
+
+ if changed:
+ if not module.check_mode:
+ rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
+ if rc:
+ module.fail_json(msg=e)
+
+ curr = { question: value }
+ if question in prev:
+ prev = {question: prev[question]}
+ else:
+ prev[question] = ''
+ if module._diff:
+ after = prev.copy()
+ after.update(curr)
+ diff_dict = {'before': prev, 'after': after}
+ else:
+ diff_dict = {}
+
+ module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
+
+ module.exit_json(changed=changed, msg=msg, current=prev)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/system/facter.py b/lib/ansible/modules/extras/system/facter.py
new file mode 100644
index 0000000000..b594836df9
--- /dev/null
+++ b/lib/ansible/modules/extras/system/facter.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = '''
+---
+module: facter
+short_description: Runs the discovery program I(facter) on the remote system
+description:
+ - Runs the I(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning
+ JSON data that can be useful for inventory purposes.
+version_added: "0.2"
+options: {}
+notes: []
+requirements: [ "facter", "ruby-json" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Example command-line invocation
+ansible www.example.net -m facter
+'''
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict()
+ )
+
+ facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--puppet", "--json"]
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/system/filesystem.py b/lib/ansible/modules/extras/system/filesystem.py
new file mode 100644
index 0000000000..10fa5afbb1
--- /dev/null
+++ b/lib/ansible/modules/extras/system/filesystem.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author: "Alexander Bulimov (@abulimov)"
+module: filesystem
+short_description: Makes file system on block device
+description:
+ - This module creates file system.
+version_added: "1.2"
+options:
+ fstype:
+ description:
+ - File System type to be created.
+ - reiserfs support was added in 2.2.
+ required: true
+ dev:
+ description:
+ - Target block device.
+ required: true
+ force:
+ choices: [ "yes", "no" ]
+ default: "no"
+ description:
+ - If yes, allows to create new filesystem on devices that already has filesystem.
+ required: false
+ resizefs:
+ choices: [ "yes", "no" ]
+ default: "no"
+ description:
+ - If yes, if the block device and filessytem size differ, grow the filesystem into the space. Note, XFS Will only grow if mounted.
+ required: false
+ version_added: "2.0"
+ opts:
+ description:
+ - List of options to be passed to mkfs command.
+notes:
+ - uses mkfs command
+'''
+
+EXAMPLES = '''
+# Create a ext2 filesystem on /dev/sdb1.
+- filesystem: fstype=ext2 dev=/dev/sdb1
+
+# Create a ext4 filesystem on /dev/sdb1 and check disk blocks.
+- filesystem: fstype=ext4 dev=/dev/sdb1 opts="-cc"
+'''
+
+def _get_dev_size(dev, module):
+ """ Return size in bytes of device. Returns int """
+ blockdev_cmd = module.get_bin_path("blockdev", required=True)
+ rc, devsize_in_bytes, err = module.run_command("%s %s %s" % (blockdev_cmd, "--getsize64", dev))
+ return int(devsize_in_bytes)
+
+
+def _get_fs_size(fssize_cmd, dev, module):
+ """ Return size in bytes of filesystem on device. Returns int """
+ cmd = module.get_bin_path(fssize_cmd, required=True)
+ if 'tune2fs' == fssize_cmd:
+ # Get Block count and Block size
+ rc, size, err = module.run_command("%s %s %s" % (cmd, '-l', dev))
+ if rc == 0:
+ for line in size.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ break
+ else:
+ module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err )
+ elif 'xfs_info' == fssize_cmd:
+ # Get Block count and Block size
+ rc, size, err = module.run_command("%s %s" % (cmd, dev))
+ if rc == 0:
+ for line in size.splitlines():
+ #if 'data' in line:
+ if 'data ' in line:
+ block_size = int(line.split('=')[2].split()[0])
+ block_count = int(line.split('=')[3].split(',')[0])
+ break
+ else:
+ module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err )
+ elif 'btrfs' == fssize_cmd:
+ #ToDo
+ # There is no way to get the blocksize and blockcount for btrfs filesystems
+ block_size = 1
+ block_count = 1
+
+
+ return block_size*block_count
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ fstype=dict(required=True, aliases=['type']),
+ dev=dict(required=True, aliases=['device']),
+ opts=dict(),
+ force=dict(type='bool', default='no'),
+ resizefs=dict(type='bool', default='no'),
+ ),
+ supports_check_mode=True,
+ )
+
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ fs_cmd_map = {
+ 'ext2' : {
+ 'mkfs' : 'mkfs.ext2',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
+ },
+ 'ext3' : {
+ 'mkfs' : 'mkfs.ext3',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
+ },
+ 'ext4' : {
+ 'mkfs' : 'mkfs.ext4',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
+ },
+ 'reiserfs' : {
+ 'mkfs' : 'mkfs.reiserfs',
+ 'grow' : 'resize_reiserfs',
+ 'grow_flag' : None,
+ 'force_flag' : '-f',
+ 'fsinfo': 'reiserfstune',
+ },
+ 'ext4dev' : {
+ 'mkfs' : 'mkfs.ext4',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
+ },
+ 'xfs' : {
+ 'mkfs' : 'mkfs.xfs',
+ 'grow' : 'xfs_growfs',
+ 'grow_flag' : None,
+ 'force_flag' : '-f',
+ 'fsinfo': 'xfs_info',
+ },
+ 'btrfs' : {
+ 'mkfs' : 'mkfs.btrfs',
+ 'grow' : 'btrfs',
+ 'grow_flag' : 'filesystem resize',
+ 'force_flag' : '-f',
+ 'fsinfo': 'btrfs',
+ }
+ }
+
+ dev = module.params['dev']
+ fstype = module.params['fstype']
+ opts = module.params['opts']
+ force = module.boolean(module.params['force'])
+ resizefs = module.boolean(module.params['resizefs'])
+
+ changed = False
+
+ try:
+ _ = fs_cmd_map[fstype]
+ except KeyError:
+ module.exit_json(changed=False, msg="WARNING: module does not support this filesystem yet. %s" % fstype)
+
+ mkfscmd = fs_cmd_map[fstype]['mkfs']
+ force_flag = fs_cmd_map[fstype]['force_flag']
+ growcmd = fs_cmd_map[fstype]['grow']
+ fssize_cmd = fs_cmd_map[fstype]['fsinfo']
+
+ if not os.path.exists(dev):
+ module.fail_json(msg="Device %s not found."%dev)
+
+ cmd = module.get_bin_path('blkid', required=True)
+
+ rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
+ fs = raw_fs.strip()
+
+ if fs == fstype and resizefs == False and not force:
+ module.exit_json(changed=False)
+ elif fs == fstype and resizefs == True:
+ # Get dev and fs size and compare
+ devsize_in_bytes = _get_dev_size(dev, module)
+ fssize_in_bytes = _get_fs_size(fssize_cmd, dev, module)
+ if fssize_in_bytes < devsize_in_bytes:
+ fs_smaller = True
+ else:
+ fs_smaller = False
+
+
+ if module.check_mode and fs_smaller:
+ module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (fstype,dev))
+ elif module.check_mode and not fs_smaller:
+ module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev))
+ elif fs_smaller:
+ cmd = module.get_bin_path(growcmd, required=True)
+ rc,out,err = module.run_command("%s %s" % (cmd, dev))
+ # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on.
+ # in the future, you would have to parse the output to determine this.
+ # thankfully, these are safe operations if no change is made.
+ if rc == 0:
+ module.exit_json(changed=True, msg=out)
+ else:
+ module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err)
+ else:
+ module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev))
+ elif fs and not force:
+ module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err)
+
+ ### create fs
+
+ if module.check_mode:
+ changed = True
+ else:
+ mkfs = module.get_bin_path(mkfscmd, required=True)
+ cmd = None
+
+ if opts is None:
+ cmd = "%s %s '%s'" % (mkfs, force_flag, dev)
+ else:
+ cmd = "%s %s %s '%s'" % (mkfs, force_flag, opts, dev)
+ rc,_,err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/system/firewalld.py b/lib/ansible/modules/extras/system/firewalld.py
new file mode 100644
index 0000000000..eefaa45dd9
--- /dev/null
+++ b/lib/ansible/modules/extras/system/firewalld.py
@@ -0,0 +1,641 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: firewalld
+short_description: Manage arbitrary ports/services with firewalld
+description:
+ - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
+version_added: "1.4"
+options:
+ service:
+ description:
+ - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services."
+ required: false
+ default: null
+ port:
+ description:
+ - "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
+ required: false
+ default: null
+ rich_rule:
+ description:
+ - "Rich rule to add/remove to/from firewalld."
+ required: false
+ default: null
+ source:
+ description:
+ - 'The source/network you would like to add/remove to/from firewalld'
+ required: false
+ default: null
+ version_added: "2.0"
+ interface:
+ description:
+ - 'The interface you would like to add/remove to/from a zone in firewalld'
+ required: false
+ default: null
+ version_added: "2.1"
+ zone:
+ description:
+ - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
+ required: false
+ default: system-default(public)
+ choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
+ permanent:
+ description:
+ - "Should this configuration be in the running firewalld configuration or persist across reboots."
+ required: false
+ default: null
+ immediate:
+ description:
+ - "Should this configuration be applied immediately, if set as permanent"
+ required: false
+ default: false
+ version_added: "1.9"
+ state:
+ description:
+ - "Should this port accept(enabled) or reject(disabled) connections."
+ required: true
+ choices: [ "enabled", "disabled" ]
+ timeout:
+ description:
+ - "The amount of time the rule should be in effect for when non-permanent."
+ required: false
+ default: 0
+ masquerade:
+ description:
+ - 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
+ required: false
+ default: null
+ version_added: "2.1"
+notes:
+ - Not tested on any Debian based system.
+ - Requires the python2 bindings of firewalld, which may not be installed by default if the distribution switched to python 3
+requirements: [ 'firewalld >= 0.2.11' ]
+author: "Adam Miller (@maxamillion)"
+'''
+
+EXAMPLES = '''
+- firewalld: service=https permanent=true state=enabled
+- firewalld: port=8081/tcp permanent=true state=disabled
+- firewalld: port=161-162/udp permanent=true state=enabled
+- firewalld: zone=dmz service=http permanent=true state=enabled
+- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
+- firewalld: source='192.0.2.0/24' zone=internal state=enabled
+- firewalld: zone=trusted interface=eth2 permanent=true state=enabled
+- firewalld: masquerade=yes state=enabled permanent=true zone=dmz
+'''
+
+import os
+import re
+
+try:
+ import firewall.config
+ FW_VERSION = firewall.config.VERSION
+
+ from firewall.client import Rich_Rule
+ from firewall.client import FirewallClient
+ fw = FirewallClient()
+ HAS_FIREWALLD = True
+except ImportError:
+ HAS_FIREWALLD = False
+
+
+#####################
+# masquerade handling
+#
+def get_masquerade_enabled(zone):
+ if fw.queryMasquerade(zone) == True:
+ return True
+ else:
+ return False
+
+def get_masquerade_enabled_permanent(zone):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ if fw_settings.getMasquerade() == True:
+ return True
+ else:
+ return False
+
+def set_masquerade_enabled(zone):
+ fw.addMasquerade(zone)
+
+def set_masquerade_disabled(zone):
+ fw.removeMasquerade(zone)
+
+def set_masquerade_permanent(zone, masquerade):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.setMasquerade(masquerade)
+ fw_zone.update(fw_settings)
+
+################
+# port handling
+#
+def get_port_enabled(zone, port_proto):
+ if port_proto in fw.getPorts(zone):
+ return True
+ else:
+ return False
+
+def set_port_enabled(zone, port, protocol, timeout):
+ fw.addPort(zone, port, protocol, timeout)
+
+def set_port_disabled(zone, port, protocol):
+ fw.removePort(zone, port, protocol)
+
+def get_port_enabled_permanent(zone, port_proto):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ if tuple(port_proto) in fw_settings.getPorts():
+ return True
+ else:
+ return False
+
+def set_port_enabled_permanent(zone, port, protocol):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.addPort(port, protocol)
+ fw_zone.update(fw_settings)
+
+def set_port_disabled_permanent(zone, port, protocol):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.removePort(port, protocol)
+ fw_zone.update(fw_settings)
+
+####################
+# source handling
+#
+def get_source(zone, source):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ if source in fw_settings.getSources():
+ return True
+ else:
+ return False
+
+def add_source(zone, source):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.addSource(source)
+ fw_zone.update(fw_settings)
+
+def remove_source(zone, source):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.removeSource(source)
+ fw_zone.update(fw_settings)
+
+####################
+# interface handling
+#
+def get_interface(zone, interface):
+ if interface in fw.getInterfaces(zone):
+ return True
+ else:
+ return False
+
+def change_zone_of_interface(zone, interface):
+ fw.changeZoneOfInterface(zone, interface)
+
+def remove_interface(zone, interface):
+ fw.removeInterface(zone, interface)
+
+def get_interface_permanent(zone, interface):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ if interface in fw_settings.getInterfaces():
+ return True
+ else:
+ return False
+
+def change_zone_of_interface_permanent(zone, interface):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ old_zone_name = fw.config().getZoneOfInterface(interface)
+ if old_zone_name != zone:
+ if old_zone_name:
+ old_zone_obj = fw.config().getZoneByName(old_zone_name)
+ old_zone_settings = old_zone_obj.getSettings()
+ old_zone_settings.removeInterface(interface) # remove from old
+ old_zone_obj.update(old_zone_settings)
+ fw_settings.addInterface(interface) # add to new
+ fw_zone.update(fw_settings)
+
+def remove_interface_permanent(zone, interface):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.removeInterface(interface)
+ fw_zone.update(fw_settings)
+
+####################
+# service handling
+#
+def get_service_enabled(zone, service):
+ if service in fw.getServices(zone):
+ return True
+ else:
+ return False
+
+def set_service_enabled(zone, service, timeout):
+ fw.addService(zone, service, timeout)
+
+def set_service_disabled(zone, service):
+ fw.removeService(zone, service)
+
+def get_service_enabled_permanent(zone, service):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ if service in fw_settings.getServices():
+ return True
+ else:
+ return False
+
+def set_service_enabled_permanent(zone, service):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.addService(service)
+ fw_zone.update(fw_settings)
+
+def set_service_disabled_permanent(zone, service):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.removeService(service)
+ fw_zone.update(fw_settings)
+
+
+####################
+# rich rule handling
+#
+def get_rich_rule_enabled(zone, rule):
+ # Convert the rule string to standard format
+ # before checking whether it is present
+ rule = str(Rich_Rule(rule_str=rule))
+ if rule in fw.getRichRules(zone):
+ return True
+ else:
+ return False
+
+def set_rich_rule_enabled(zone, rule, timeout):
+ fw.addRichRule(zone, rule, timeout)
+
+def set_rich_rule_disabled(zone, rule):
+ fw.removeRichRule(zone, rule)
+
+def get_rich_rule_enabled_permanent(zone, rule):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ # Convert the rule string to standard format
+ # before checking whether it is present
+ rule = str(Rich_Rule(rule_str=rule))
+ if rule in fw_settings.getRichRules():
+ return True
+ else:
+ return False
+
+def set_rich_rule_enabled_permanent(zone, rule):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.addRichRule(rule)
+ fw_zone.update(fw_settings)
+
+def set_rich_rule_disabled_permanent(zone, rule):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.removeRichRule(rule)
+ fw_zone.update(fw_settings)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ service=dict(required=False,default=None),
+ port=dict(required=False,default=None),
+ rich_rule=dict(required=False,default=None),
+ zone=dict(required=False,default=None),
+ immediate=dict(type='bool',default=False),
+ source=dict(required=False,default=None),
+ permanent=dict(type='bool',required=False,default=None),
+ state=dict(choices=['enabled', 'disabled'], required=True),
+ timeout=dict(type='int',required=False,default=0),
+ interface=dict(required=False,default=None),
+ masquerade=dict(required=False,default=None),
+ ),
+ supports_check_mode=True
+ )
+ if module.params['source'] == None and module.params['permanent'] == None:
+ module.fail_json(msg='permanent is a required parameter')
+
+ if module.params['interface'] != None and module.params['zone'] == None:
+ module.fail(msg='zone is a required parameter')
+
+ if not HAS_FIREWALLD:
+ module.fail_json(msg='firewalld and its python 2 module are required for this module')
+
+ ## Pre-run version checking
+ if FW_VERSION < "0.2.11":
+ module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
+ ## Check for firewalld running
+ try:
+ if fw.connected == False:
+ module.fail_json(msg='firewalld service must be running')
+ except AttributeError:
+ module.fail_json(msg="firewalld connection can't be established,\
+ installed version (%s) likely too old. Requires firewalld >= 2.0.11" % FW_VERSION)
+
+ ## Global Vars
+ changed=False
+ msgs = []
+ service = module.params['service']
+ rich_rule = module.params['rich_rule']
+ source = module.params['source']
+
+ if module.params['port'] != None:
+ port, protocol = module.params['port'].split('/')
+ if protocol == None:
+ module.fail_json(msg='improper port format (missing protocol?)')
+ else:
+ port = None
+
+ if module.params['zone'] != None:
+ zone = module.params['zone']
+ else:
+ zone = fw.getDefaultZone()
+
+ permanent = module.params['permanent']
+ desired_state = module.params['state']
+ immediate = module.params['immediate']
+ timeout = module.params['timeout']
+ interface = module.params['interface']
+ masquerade = module.params['masquerade']
+
+ modification_count = 0
+ if service != None:
+ modification_count += 1
+ if port != None:
+ modification_count += 1
+ if rich_rule != None:
+ modification_count += 1
+ if interface != None:
+ modification_count += 1
+ if masquerade != None:
+ modification_count += 1
+
+ if modification_count > 1:
+ module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
+
+ if service != None:
+ if permanent:
+ is_enabled = get_service_enabled_permanent(zone, service)
+ msgs.append('Permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_service_enabled_permanent(zone, service)
+ changed=True
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_service_disabled_permanent(zone, service)
+ changed=True
+ if immediate or not permanent:
+ is_enabled = get_service_enabled(zone, service)
+ msgs.append('Non-permanent operation')
+
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_service_enabled(zone, service, timeout)
+ changed=True
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_service_disabled(zone, service)
+ changed=True
+
+ if changed == True:
+ msgs.append("Changed service %s to %s" % (service, desired_state))
+
+ if source != None:
+ is_enabled = get_source(zone, source)
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ add_source(zone, source)
+ changed=True
+ msgs.append("Added %s to zone %s" % (source, zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ remove_source(zone, source)
+ changed=True
+ msgs.append("Removed %s from zone %s" % (source, zone))
+
+ if port != None:
+ if permanent:
+ is_enabled = get_port_enabled_permanent(zone, [port, protocol])
+ msgs.append('Permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_port_enabled_permanent(zone, port, protocol)
+ changed=True
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_port_disabled_permanent(zone, port, protocol)
+ changed=True
+ if immediate or not permanent:
+ is_enabled = get_port_enabled(zone, [port,protocol])
+ msgs.append('Non-permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_port_enabled(zone, port, protocol, timeout)
+ changed=True
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_port_disabled(zone, port, protocol)
+ changed=True
+
+ if changed == True:
+ msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
+ desired_state))
+
+ if rich_rule != None:
+ if permanent:
+ is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule)
+ msgs.append('Permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_rich_rule_enabled_permanent(zone, rich_rule)
+ changed=True
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_rich_rule_disabled_permanent(zone, rich_rule)
+ changed=True
+ if immediate or not permanent:
+ is_enabled = get_rich_rule_enabled(zone, rich_rule)
+ msgs.append('Non-permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_rich_rule_enabled(zone, rich_rule, timeout)
+ changed=True
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_rich_rule_disabled(zone, rich_rule)
+ changed=True
+
+ if changed == True:
+ msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
+
+ if interface != None:
+ if permanent:
+ is_enabled = get_interface_permanent(zone, interface)
+ msgs.append('Permanent operation')
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ change_zone_of_interface_permanent(zone, interface)
+ changed=True
+ msgs.append("Changed %s to zone %s" % (interface, zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ remove_interface_permanent(zone, interface)
+ changed=True
+ msgs.append("Removed %s from zone %s" % (interface, zone))
+ if immediate or not permanent:
+ is_enabled = get_interface(zone, interface)
+ msgs.append('Non-permanent operation')
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ change_zone_of_interface(zone, interface)
+ changed=True
+ msgs.append("Changed %s to zone %s" % (interface, zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ remove_interface(zone, interface)
+ changed=True
+ msgs.append("Removed %s from zone %s" % (interface, zone))
+
+ if masquerade != None:
+
+ if permanent:
+ is_enabled = get_masquerade_enabled_permanent(zone)
+ msgs.append('Permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_masquerade_permanent(zone, True)
+ changed=True
+ msgs.append("Added masquerade to zone %s" % (zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_masquerade_permanent(zone, False)
+ changed=True
+ msgs.append("Removed masquerade from zone %s" % (zone))
+ if immediate or not permanent:
+ is_enabled = get_masquerade_enabled(zone)
+ msgs.append('Non-permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_masquerade_enabled(zone)
+ changed=True
+ msgs.append("Added masquerade to zone %s" % (zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ set_masquerade_disabled(zone)
+ changed=True
+ msgs.append("Removed masquerade from zone %s" % (zone))
+
+ module.exit_json(changed=changed, msg=', '.join(msgs))
+
+
+#################################################
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/getent.py b/lib/ansible/modules/extras/system/getent.py
new file mode 100644
index 0000000000..37bfc244de
--- /dev/null
+++ b/lib/ansible/modules/extras/system/getent.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = '''
+---
+module: getent
+short_description: a wrapper to the unix getent utility
+description:
+ - Runs getent against one of it's various databases and returns information into
+ the host's facts, in a getent_<database> prefixed variable
+version_added: "1.8"
+options:
+ database:
+ required: True
+ description:
+ - the name of a getent database supported by the target system (passwd, group,
+ hosts, etc).
+ key:
+ required: False
+ default: ''
+ description:
+ - key from which to return values from the specified database, otherwise the
+ full contents are returned.
+ split:
+ required: False
+ default: None
+ description:
+ - "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database"
+ fail_key:
+ required: False
+ default: True
+ description:
+ - If a supplied key is missing this will make the task fail if True
+
+notes:
+ - "Not all databases support enumeration, check system documentation for details"
+requirements: [ ]
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+# get root user info
+- getent: database=passwd key=root
+- debug: var=getent_passwd
+
+# get all groups
+- getent: database=group split=':'
+- debug: var=getent_group
+
+# get all hosts, split by tab
+- getent: database=hosts
+- debug: var=getent_hosts
+
+# get http service info, no error if missing
+- getent: database=services key=http fail_key=False
+- debug: var=getent_services
+
+# get user password hash (requires sudo/root)
+- getent: database=shadow key=www-data split=:
+- debug: var=getent_shadow
+
+'''
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ database = dict(required=True),
+ key = dict(required=False, default=None),
+ split = dict(required=False, default=None),
+ fail_key = dict(required=False, type='bool', default=True),
+ ),
+ supports_check_mode = True,
+ )
+
+ colon = [ 'passwd', 'shadow', 'group', 'gshadow' ]
+
+ database = module.params['database']
+ key = module.params.get('key')
+ split = module.params.get('split')
+ fail_key = module.params.get('fail_key')
+
+ getent_bin = module.get_bin_path('getent', True)
+
+ if key is not None:
+ cmd = [ getent_bin, database, key ]
+ else:
+ cmd = [ getent_bin, database ]
+
+ if split is None and database in colon:
+ split = ':'
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ msg = "Unexpected failure!"
+ dbtree = 'getent_%s' % database
+ results = { dbtree: {} }
+
+ if rc == 0:
+ for line in out.splitlines():
+ record = line.split(split)
+ results[dbtree][record[0]] = record[1:]
+
+ module.exit_json(ansible_facts=results)
+
+ elif rc == 1:
+ msg = "Missing arguments, or database unknown."
+ elif rc == 2:
+ msg = "One or more supplied key could not be found in the database."
+ if not fail_key:
+ results[dbtree][key] = None
+ module.exit_json(ansible_facts=results, msg=msg)
+ elif rc == 3:
+ msg = "Enumeration not supported on this database."
+
+ module.fail_json(msg=msg)
+
+
+main()
+
diff --git a/lib/ansible/modules/extras/system/gluster_volume.py b/lib/ansible/modules/extras/system/gluster_volume.py
new file mode 100644
index 0000000000..85271d94ea
--- /dev/null
+++ b/lib/ansible/modules/extras/system/gluster_volume.py
@@ -0,0 +1,492 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Taneli Leppä <taneli@crasman.fi>
+#
+# This file is part of Ansible (sort of)
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = """
+module: gluster_volume
+short_description: Manage GlusterFS volumes
+description:
+ - Create, remove, start, stop and tune GlusterFS volumes
+version_added: "1.9"
+options:
+ name:
+ required: true
+ description:
+ - The volume name
+ state:
+ required: true
+ choices: [ 'present', 'absent', 'started', 'stopped' ]
+ description:
+ - Use present/absent ensure if a volume exists or not,
+ use started/stopped to control it's availability.
+ cluster:
+ required: false
+ default: null
+ description:
+ - List of hosts to use for probing and brick setup
+ host:
+ required: false
+ default: null
+ description:
+ - Override local hostname (for peer probing purposes)
+ replicas:
+ required: false
+ default: null
+ description:
+ - Replica count for volume
+ stripes:
+ required: false
+ default: null
+ description:
+ - Stripe count for volume
+ disperses:
+ required: false
+ default: null
+ description:
+ - Disperse count for volume
+ version_added: "2.2"
+ redundancies:
+ required: false
+ default: null
+ description:
+ - Redundancy count for volume
+ version_added: "2.2"
+ transport:
+ required: false
+ choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
+ default: 'tcp'
+ description:
+ - Transport type for volume
+ bricks:
+ required: false
+ default: null
+ description:
+ - Brick paths on servers. Multiple brick paths can be separated by commas
+ aliases: ['brick']
+ start_on_create:
+ choices: [ 'yes', 'no']
+ required: false
+ default: 'yes'
+ description:
+ - Controls whether the volume is started after creation or not, defaults to yes
+ rebalance:
+ choices: [ 'yes', 'no']
+ required: false
+ default: 'no'
+ description:
+ - Controls whether the cluster is rebalanced after changes
+ directory:
+ required: false
+ default: null
+ description:
+ - Directory for limit-usage
+ options:
+ required: false
+ default: null
+ description:
+ - A dictionary/hash with options/settings for the volume
+ quota:
+ required: false
+ default: null
+ description:
+ - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
+ force:
+ required: false
+ default: null
+ description:
+ - If brick is being created in the root partition, module will fail.
+ Set force to true to override this behaviour
+notes:
+ - "Requires cli tools for GlusterFS on servers"
+ - "Will add new bricks, but not remove them"
+author: "Taneli Leppä (@rosmo)"
+"""
+
+EXAMPLES = """
+- name: create gluster volume
+ gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.0.2.10,192.0.2.11"
+ run_once: true
+
+- name: tune
+ gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}'
+
+- name: start gluster volume
+ gluster_volume: state=started name=test1
+
+- name: limit usage
+ gluster_volume: state=present name=test1 directory=/foo quota=20.0MB
+
+- name: stop gluster volume
+ gluster_volume: state=stopped name=test1
+
+- name: remove gluster volume
+ gluster_volume: state=absent name=test1
+
+- name: create gluster volume with multiple bricks
+ gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.0.2.10,192.0.2.11"
+ run_once: true
+"""
+
+import shutil
+import time
+import socket
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
+
+glusterbin = ''
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)))
+ return out
+
+def run_gluster_nofail(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ return None
+ return out
+
+def run_gluster_yes(gargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ rc, out, err = module.run_command(args, data='y\n')
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
+ return out
+
+def get_peers():
+ out = run_gluster([ 'peer', 'status'])
+ i = 0
+ peers = {}
+ hostname = None
+ uuid = None
+ state = None
+ shortNames = False
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'hostname':
+ hostname = value
+ shortNames = False
+ if key.lower() == 'uuid':
+ uuid = value
+ if key.lower() == 'state':
+ state = value
+ peers[hostname] = [ uuid, state ]
+ elif row.lower() == 'other names:':
+ shortNames = True
+ elif row != '' and shortNames == True:
+ peers[row] = [ uuid, state ]
+ elif row == '':
+ shortNames = False
+ return peers
+
+def get_volumes():
+ out = run_gluster([ 'volume', 'info' ])
+
+ volumes = {}
+ volume = {}
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'volume name':
+ volume['name'] = value
+ volume['options'] = {}
+ volume['quota'] = False
+ if key.lower() == 'volume id':
+ volume['id'] = value
+ if key.lower() == 'status':
+ volume['status'] = value
+ if key.lower() == 'transport-type':
+ volume['transport'] = value
+ if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
+ if not 'bricks' in volume:
+ volume['bricks'] = []
+ volume['bricks'].append(value)
+ # Volume options
+ if '.' in key:
+ if not 'options' in volume:
+ volume['options'] = {}
+ volume['options'][key] = value
+ if key == 'features.quota' and value == 'on':
+ volume['quota'] = True
+ else:
+ if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
+ if len(volume) > 0:
+ volumes[volume['name']] = volume
+ volume = {}
+ return volumes
+
+def get_quotas(name, nofail):
+ quotas = {}
+ if nofail:
+ out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
+ if not out:
+ return quotas
+ else:
+ out = run_gluster([ 'volume', 'quota', name, 'list' ])
+ for row in out.split('\n'):
+ if row[:1] == '/':
+ q = re.split('\s+', row)
+ quotas[q[0]] = q[1]
+ return quotas
+
+def wait_for_peer(host):
+ for x in range(0, 4):
+ peers = get_peers()
+ if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
+ return True
+ time.sleep(1)
+ return False
+
+def probe(host, myhostname):
+ global module
+ out = run_gluster([ 'peer', 'probe', host ])
+ if not out.find('localhost') and not wait_for_peer(host):
+ module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
+ changed = True
+
+def probe_all_peers(hosts, peers, myhostname):
+ for host in hosts:
+ host = host.strip() # Clean up any extra space for exact comparison
+ if host not in peers:
+ probe(host, myhostname)
+
+def create_volume(name, stripe, replica, disperse, redundancy, transport, hosts, bricks, force):
+ args = [ 'volume', 'create' ]
+ args.append(name)
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ if disperse:
+ args.append('disperse')
+ args.append(str(disperse))
+ if redundancy:
+ args.append('redundancy')
+ args.append(str(redundancy))
+ args.append('transport')
+ args.append(transport)
+ for brick in bricks:
+ for host in hosts:
+ args.append(('%s:%s' % (host, brick)))
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+def start_volume(name):
+ run_gluster([ 'volume', 'start', name ])
+
+def stop_volume(name):
+ run_gluster_yes([ 'volume', 'stop', name ])
+
+def set_volume_option(name, option, parameter):
+ run_gluster([ 'volume', 'set', name, option, parameter ])
+
+def add_bricks(name, new_bricks, force):
+ args = [ 'volume', 'add-brick', name ]
+ args.extend(new_bricks)
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+def do_rebalance(name):
+ run_gluster([ 'volume', 'rebalance', name, 'start' ])
+
+def enable_quota(name):
+ run_gluster([ 'volume', 'quota', name, 'enable' ])
+
+def set_quota(name, directory, value):
+ run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
+
+
+def main():
+ ### MAIN ###
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, default=None, aliases=['volume']),
+ state=dict(required=True, choices=[ 'present', 'absent', 'started', 'stopped', 'rebalanced' ]),
+ cluster=dict(required=False, default=None, type='list'),
+ host=dict(required=False, default=None),
+ stripes=dict(required=False, default=None, type='int'),
+ replicas=dict(required=False, default=None, type='int'),
+ disperses=dict(required=False, default=None, type='int'),
+ redundancies=dict(required=False, default=None, type='int'),
+ transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
+ bricks=dict(required=False, default=None, aliases=['brick']),
+ start_on_create=dict(required=False, default=True, type='bool'),
+ rebalance=dict(required=False, default=False, type='bool'),
+ options=dict(required=False, default={}, type='dict'),
+ quota=dict(required=False),
+ directory=dict(required=False, default=None),
+ force=dict(required=False, default=False, type='bool'),
+ )
+ )
+
+ global glusterbin
+ glusterbin = module.get_bin_path('gluster', True)
+
+ changed = False
+
+ action = module.params['state']
+ volume_name = module.params['name']
+ cluster= module.params['cluster']
+ brick_paths = module.params['bricks']
+ stripes = module.params['stripes']
+ replicas = module.params['replicas']
+ disperses = module.params['disperses']
+ redundancies = module.params['redundancies']
+ transport = module.params['transport']
+ myhostname = module.params['host']
+ start_on_create = module.boolean(module.params['start_on_create'])
+ rebalance = module.boolean(module.params['rebalance'])
+ force = module.boolean(module.params['force'])
+
+ if not myhostname:
+ myhostname = socket.gethostname()
+
+ # Clean up if last element is empty. Consider that yml can look like this:
+ # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
+ if cluster != None and len(cluster) > 1 and cluster[-1] == '':
+ cluster = cluster[0:-1]
+
+ if cluster == None or cluster[0] == '':
+ cluster = [myhostname]
+
+ if brick_paths != None and "," in brick_paths:
+ brick_paths = brick_paths.split(",")
+ else:
+ brick_paths = [brick_paths]
+
+ options = module.params['options']
+ quota = module.params['quota']
+ directory = module.params['directory']
+
+
+ # get current state info
+ peers = get_peers()
+ volumes = get_volumes()
+ quotas = {}
+ if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
+ quotas = get_quotas(volume_name, True)
+
+ # do the work!
+ if action == 'absent':
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ run_gluster_yes([ 'volume', 'delete', volume_name ])
+ changed = True
+
+ if action == 'present':
+ probe_all_peers(cluster, peers, myhostname)
+
+ # create if it doesn't exist
+ if volume_name not in volumes:
+ create_volume(volume_name, stripes, replicas, disperses, redundancies, transport, cluster, brick_paths, force)
+ volumes = get_volumes()
+ changed = True
+
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
+ start_volume(volume_name)
+ changed = True
+
+ # switch bricks
+ new_bricks = []
+ removed_bricks = []
+ all_bricks = []
+ for node in cluster:
+ for brick_path in brick_paths:
+ brick = '%s:%s' % (node, brick_path)
+ all_bricks.append(brick)
+ if brick not in volumes[volume_name]['bricks']:
+ new_bricks.append(brick)
+
+ # this module does not yet remove bricks, but we check those anyways
+ for brick in volumes[volume_name]['bricks']:
+ if brick not in all_bricks:
+ removed_bricks.append(brick)
+
+ if new_bricks:
+ add_bricks(volume_name, new_bricks, force)
+ changed = True
+
+ # handle quotas
+ if quota:
+ if not volumes[volume_name]['quota']:
+ enable_quota(volume_name)
+ quotas = get_quotas(volume_name, False)
+ if directory not in quotas or quotas[directory] != quota:
+ set_quota(volume_name, directory, quota)
+ changed = True
+
+ # set options
+ for option in options.keys():
+ if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
+ set_volume_option(volume_name, option, options[option])
+ changed = True
+
+ else:
+ module.fail_json(msg='failed to create volume %s' % volume_name)
+
+ if action != 'delete' and volume_name not in volumes:
+ module.fail_json(msg='volume not found %s' % volume_name)
+
+ if action == 'started':
+ if volumes[volume_name]['status'].lower() != 'started':
+ start_volume(volume_name)
+ changed = True
+
+ if action == 'stopped':
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ changed = True
+
+ if changed:
+ volumes = get_volumes()
+ if rebalance:
+ do_rebalance(volume_name)
+
+ facts = {}
+ facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
+
+ module.exit_json(changed=changed, ansible_facts=facts)
+
+main()
diff --git a/lib/ansible/modules/extras/system/iptables.py b/lib/ansible/modules/extras/system/iptables.py
new file mode 100644
index 0000000000..5d05518236
--- /dev/null
+++ b/lib/ansible/modules/extras/system/iptables.py
@@ -0,0 +1,536 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+BINS = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+DOCUMENTATION = '''
+---
+module: iptables
+short_description: Modify the systems iptables
+requirements: []
+version_added: "2.0"
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Iptables is used to set up, maintain, and inspect the tables of IP packet
+ filter rules in the Linux kernel. This module does not handle the saving
+ and/or loading of rules, but rather only manipulates the current rules
+ that are present in memory. This is the same as the behaviour of the
+ "iptables" and "ip6tables" command which this module uses internally.
+notes:
+ - This module just deals with individual rules. If you need advanced
+ chaining of rules the recommended way is to template the iptables restore
+ file.
+options:
+ table:
+ description:
+ - This option specifies the packet matching table which the command
+ should operate on. If the kernel is configured with automatic module
+ loading, an attempt will be made to load the appropriate module for
+ that table if it is not already there.
+ required: false
+ default: filter
+ choices: [ "filter", "nat", "mangle", "raw", "security" ]
+ state:
+ description:
+ - Whether the rule should be absent or present.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ action:
+ version_added: "2.2"
+ description:
+ - Whether the rule should be appended at the bottom or inserted at the
+ top. If the rule already exists the chain won't be modified.
+ required: false
+ default: append
+ choices: [ "append", "insert" ]
+ ip_version:
+ description:
+ - Which version of the IP protocol this rule should apply to.
+ required: false
+ default: ipv4
+ choices: [ "ipv4", "ipv6" ]
+ chain:
+ description:
+ - "Chain to operate on. This option can either be the name of a user
+ defined chain or any of the builtin chains: 'INPUT', 'FORWARD',
+ 'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'."
+ required: false
+ protocol:
+ description:
+ - The protocol of the rule or of the packet to check. The specified
+ protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the
+ special keyword "all", or it can be a numeric value, representing one
+ of these protocols or a different one. A protocol name from
+ /etc/protocols is also allowed. A "!" argument before the protocol
+ inverts the test. The number zero is equivalent to all. "all" will
+ match with all protocols and is taken as default when this option is
+ omitted.
+ required: false
+ default: null
+ source:
+ description:
+ - Source specification. Address can be either a network name,
+ a hostname, a network IP address (with /mask), or a plain IP address.
+ Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea. The mask can be
+ either a network mask or a plain number, specifying the number of 1's
+ at the left side of the network mask. Thus, a mask of 24 is equivalent
+ to 255.255.255.0. A "!" argument before the address specification
+ inverts the sense of the address.
+ required: false
+ default: null
+ destination:
+ description:
+ - Destination specification. Address can be either a network name,
+ a hostname, a network IP address (with /mask), or a plain IP address.
+ Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea. The mask can be
+ either a network mask or a plain number, specifying the number of 1's
+ at the left side of the network mask. Thus, a mask of 24 is equivalent
+ to 255.255.255.0. A "!" argument before the address specification
+ inverts the sense of the address.
+ required: false
+ default: null
+ match:
+ description:
+ - Specifies a match to use, that is, an extension module that tests for
+ a specific property. The set of matches make up the condition under
+ which a target is invoked. Matches are evaluated first to last if
+ specified as an array and work in short-circuit fashion, i.e. if one
+ extension yields false, evaluation will stop.
+ required: false
+ default: []
+ jump:
+ description:
+ - This specifies the target of the rule; i.e., what to do if the packet
+ matches it. The target can be a user-defined chain (other than the one
+ this rule is in), one of the special builtin targets which decide the
+ fate of the packet immediately, or an extension (see EXTENSIONS
+ below). If this option is omitted in a rule (and the goto paramater
+ is not used), then matching the rule will have no effect on the
+ packet's fate, but the counters on the rule will be incremented.
+ required: false
+ default: null
+ goto:
+ description:
+ - This specifies that the processing should continue in a user specified
+ chain. Unlike the jump argument return will not continue processing in
+ this chain but instead in the chain that called us via jump.
+ required: false
+ default: null
+ in_interface:
+ description:
+ - Name of an interface via which a packet was received (only for packets
+ entering the INPUT, FORWARD and PREROUTING chains). When the "!"
+ argument is used before the interface name, the sense is inverted. If
+ the interface name ends in a "+", then any interface which begins with
+ this name will match. If this option is omitted, any interface name
+ will match.
+ required: false
+ default: null
+ out_interface:
+ description:
+ - Name of an interface via which a packet is going to be sent (for
+ packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the
+ "!" argument is used before the interface name, the sense is inverted.
+ If the interface name ends in a "+", then any interface which begins
+ with this name will match. If this option is omitted, any interface
+ name will match.
+ required: false
+ default: null
+ fragment:
+ description:
+ - This means that the rule only refers to second and further fragments
+ of fragmented packets. Since there is no way to tell the source or
+ destination ports of such a packet (or ICMP type), such a packet will
+ not match any rules which specify them. When the "!" argument precedes
+ fragment argument, the rule will only match head fragments, or
+ unfragmented packets.
+ required: false
+ default: null
+ set_counters:
+ description:
+ - This enables the administrator to initialize the packet and byte
+ counters of a rule (during INSERT, APPEND, REPLACE operations).
+ required: false
+ default: null
+ source_port:
+ description:
+ - "Source port or port range specification. This can either be a service
+ name or a port number. An inclusive range can also be specified, using
+ the format first:last. If the first port is omitted, '0' is assumed;
+ if the last is omitted, '65535' is assumed. If the first port is
+ greater than the second one they will be swapped."
+ required: false
+ default: null
+ destination_port:
+ description:
+ - "Destination port or port range specification. This can either be
+ a service name or a port number. An inclusive range can also be
+ specified, using the format first:last. If the first port is omitted,
+ '0' is assumed; if the last is omitted, '65535' is assumed. If the
+ first port is greater than the second one they will be swapped."
+ required: false
+ default: null
+ to_ports:
+ description:
+ - "This specifies a destination port or range of ports to use: without
+ this, the destination port is never altered. This is only valid if the
+ rule also specifies one of the following protocols: tcp, udp, dccp or
+ sctp."
+ required: false
+ default: null
+ to_destination:
+ version_added: "2.1"
+ description:
+ - "This specifies a destination address to use with DNAT: without
+ this, the destination address is never altered."
+ required: false
+ default: null
+ to_source:
+ version_added: "2.2"
+ description:
+ - "This specifies a source address to use with SNAT: without
+ this, the source address is never altered."
+ required: false
+ default: null
+ set_dscp_mark:
+ version_added: "2.1"
+ description:
+ - "This allows specifying a DSCP mark to be added to packets.
+ It takes either an integer or hex value. Mutually exclusive with
+ C(set_dscp_mark_class)."
+ required: false
+ default: null
+ set_dscp_mark_class:
+ version_added: "2.1"
+ description:
+ - "This allows specifying a predefined DiffServ class which will be
+ translated to the corresponding DSCP mark. Mutually exclusive with
+ C(set_dscp_mark)."
+ required: false
+ default: null
+ comment:
+ description:
+ - "This specifies a comment that will be added to the rule"
+ required: false
+ default: null
+ ctstate:
+ description:
+ - "ctstate is a list of the connection states to match in the conntrack
+ module.
+ Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED',
+ 'UNTRACKED', 'SNAT', 'DNAT'"
+ required: false
+ default: []
+ limit:
+ description:
+ - "Specifies the maximum average number of matches to allow per second.
+ The number can specify units explicitly, using `/second', `/minute',
+ `/hour' or `/day', or parts of them (so `5/second' is the same as
+ `5/s')."
+ required: false
+ default: null
+ limit_burst:
+ version_added: "2.1"
+ description:
+ - "Specifies the maximum burst before the above limit kicks in."
+ required: false
+ default: null
+ uid_owner:
+ version_added: "2.1"
+ description:
+ - "Specifies the UID or username to use in match by owner rule."
+ required: false
+ reject_with:
+ version_added: "2.1"
+ description:
+ - "Specifies the error packet type to return while rejecting."
+ required: false
+ icmp_type:
+ version_added: "2.2"
+ description:
+ - "This allows specification of the ICMP type, which can be a numeric
+ ICMP type, type/code pair, or one of the ICMP type names shown by the
+ command 'iptables -p icmp -h'"
+ required: false
+ flush:
+ version_added: "2.2"
+ description:
+ - "Flushes the specified table and chain of all rules. If no chain is
+ specified then the entire table is purged. Ignores all other
+ parameters."
+ required: false
+ policy:
+ version_added: "2.2"
+ description:
+ - "Set the policy for the chain to the given target. Valid targets are
+ ACCEPT, DROP, QUEUE, RETURN. Only built in chains can have policies.
+ This parameter requires the chain parameter. Ignores all other
+ parameters."
+'''
+
+EXAMPLES = '''
+# Block specific IP
+- iptables: chain=INPUT source=8.8.8.8 jump=DROP
+ become: yes
+
+# Forward port 80 to 8600
+- iptables: table=nat chain=PREROUTING in_interface=eth0 protocol=tcp match=tcp destination_port=80 jump=REDIRECT to_ports=8600 comment="Redirect web traffic to port 8600"
+ become: yes
+
+# Allow related and established connections
+- iptables: chain=INPUT ctstate=ESTABLISHED,RELATED jump=ACCEPT
+ become: yes
+
+# Tag all outbound tcp packets with DSCP mark 8
+- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark=8 protocol=tcp
+
+# Tag all outbound tcp packets with DSCP DiffServ class CS1
+- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark_class=CS1 protocol=tcp
+'''
+
+
+def append_param(rule, param, flag, is_list):
+ if is_list:
+ for item in param:
+ append_param(rule, item, flag, False)
+ else:
+ if param is not None:
+ rule.extend([flag, param])
+
+
+def append_csv(rule, param, flag):
+ if param:
+ rule.extend([flag, ','.join(param)])
+
+
+def append_match(rule, param, match):
+ if param:
+ rule.extend(['-m', match])
+
+
+def append_jump(rule, param, jump):
+ if param:
+ rule.extend(['-j', jump])
+
+
+def construct_rule(params):
+ rule = []
+ append_param(rule, params['protocol'], '-p', False)
+ append_param(rule, params['source'], '-s', False)
+ append_param(rule, params['destination'], '-d', False)
+ append_param(rule, params['match'], '-m', True)
+ append_param(rule, params['jump'], '-j', False)
+ append_param(rule, params['to_destination'], '--to-destination', False)
+ append_param(rule, params['to_source'], '--to-source', False)
+ append_param(rule, params['goto'], '-g', False)
+ append_param(rule, params['in_interface'], '-i', False)
+ append_param(rule, params['out_interface'], '-o', False)
+ append_param(rule, params['fragment'], '-f', False)
+ append_param(rule, params['set_counters'], '-c', False)
+ append_param(rule, params['source_port'], '--source-port', False)
+ append_param(rule, params['destination_port'], '--destination-port', False)
+ append_param(rule, params['to_ports'], '--to-ports', False)
+ append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
+ append_param(
+ rule,
+ params['set_dscp_mark_class'],
+ '--set-dscp-class',
+ False)
+ append_match(rule, params['comment'], 'comment')
+ append_param(rule, params['comment'], '--comment', False)
+ append_match(rule, params['ctstate'], 'state')
+ append_csv(rule, params['ctstate'], '--state')
+ append_match(rule, params['limit'] or params['limit_burst'], 'limit')
+ append_param(rule, params['limit'], '--limit', False)
+ append_param(rule, params['limit_burst'], '--limit-burst', False)
+ append_match(rule, params['uid_owner'], 'owner')
+ append_param(rule, params['uid_owner'], '--uid-owner', False)
+ append_jump(rule, params['reject_with'], 'REJECT')
+ append_param(rule, params['reject_with'], '--reject-with', False)
+ append_param(rule, params['icmp_type'], '--icmp-type', False)
+ return rule
+
+
+def push_arguments(iptables_path, action, params, make_rule=True):
+ cmd = [iptables_path]
+ cmd.extend(['-t', params['table']])
+ cmd.extend([action, params['chain']])
+ if make_rule:
+ cmd.extend(construct_rule(params))
+ return cmd
+
+
+def check_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-C', params)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def append_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-A', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def insert_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-I', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def remove_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-D', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def flush_table(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def set_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
+ cmd.append(params['policy'])
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ table=dict(
+ required=False,
+ default='filter',
+ choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ state=dict(
+ required=False,
+ default='present',
+ choices=['present', 'absent']),
+ action=dict(
+ required=False,
+ default='append',
+ type='str',
+ choices=['append', 'insert']),
+ ip_version=dict(
+ required=False,
+ default='ipv4',
+ choices=['ipv4', 'ipv6']),
+ chain=dict(required=False, default=None, type='str'),
+ protocol=dict(required=False, default=None, type='str'),
+ source=dict(required=False, default=None, type='str'),
+ to_source=dict(required=False, default=None, type='str'),
+ destination=dict(required=False, default=None, type='str'),
+ to_destination=dict(required=False, default=None, type='str'),
+ match=dict(required=False, default=[], type='list'),
+ jump=dict(required=False, default=None, type='str'),
+ goto=dict(required=False, default=None, type='str'),
+ in_interface=dict(required=False, default=None, type='str'),
+ out_interface=dict(required=False, default=None, type='str'),
+ fragment=dict(required=False, default=None, type='str'),
+ set_counters=dict(required=False, default=None, type='str'),
+ source_port=dict(required=False, default=None, type='str'),
+ destination_port=dict(required=False, default=None, type='str'),
+ to_ports=dict(required=False, default=None, type='str'),
+ set_dscp_mark=dict(required=False, default=None, type='str'),
+ set_dscp_mark_class=dict(required=False, default=None, type='str'),
+ comment=dict(required=False, default=None, type='str'),
+ ctstate=dict(required=False, default=[], type='list'),
+ limit=dict(required=False, default=None, type='str'),
+ limit_burst=dict(required=False, default=None, type='str'),
+ uid_owner=dict(required=False, default=None, type='str'),
+ reject_with=dict(required=False, default=None, type='str'),
+ icmp_type=dict(required=False, default=None, type='str'),
+ flush=dict(required=False, default=False, type='bool'),
+ policy=dict(
+ required=False,
+ default=None,
+ type='str',
+ choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
+ ),
+ mutually_exclusive=(
+ ['set_dscp_mark', 'set_dscp_mark_class'],
+ ['flush', 'policy'],
+ ),
+ )
+ args = dict(
+ changed=False,
+ failed=False,
+ ip_version=module.params['ip_version'],
+ table=module.params['table'],
+ chain=module.params['chain'],
+ flush=module.params['flush'],
+ rule=' '.join(construct_rule(module.params)),
+ state=module.params['state'],
+ )
+
+ ip_version = module.params['ip_version']
+ iptables_path = module.get_bin_path(BINS[ip_version], True)
+
+ # Check if chain option is required
+ if args['flush'] is False and args['chain'] is None:
+ module.fail_json(
+ msg="Either chain or flush parameter must be specified.")
+
+ # Flush the table
+ if args['flush'] is True:
+ flush_table(iptables_path, module, module.params)
+ module.exit_json(**args)
+
+ # Set the policy
+ if module.params['policy']:
+ set_chain_policy(iptables_path, module, module.params)
+ module.exit_json(**args)
+
+ insert = (module.params['action'] == 'insert')
+ rule_is_present = check_present(iptables_path, module, module.params)
+ should_be_present = (args['state'] == 'present')
+
+ # Check if target is up to date
+ args['changed'] = (rule_is_present != should_be_present)
+
+ # Check only; don't modify
+ if module.check_mode:
+ module.exit_json(changed=args['changed'])
+
+ # Target is already up to date
+ if args['changed'] is False:
+ module.exit_json(**args)
+
+ if should_be_present:
+ if insert:
+ insert_rule(iptables_path, module, module.params)
+ else:
+ append_rule(iptables_path, module, module.params)
+ else:
+ remove_rule(iptables_path, module, module.params)
+
+ module.exit_json(**args)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/system/kernel_blacklist.py b/lib/ansible/modules/extras/system/kernel_blacklist.py
new file mode 100644
index 0000000000..296a082a2e
--- /dev/null
+++ b/lib/ansible/modules/extras/system/kernel_blacklist.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# encoding: utf-8 -*-
+
+# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+
+
+DOCUMENTATION = '''
+---
+module: kernel_blacklist
+author: "Matthias Vogelgesang (@matze)"
+version_added: 1.4
+short_description: Blacklist kernel modules
+description:
+ - Add or remove kernel modules from blacklist.
+options:
+ name:
+ required: true
+ description:
+ - Name of kernel module to black- or whitelist.
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ blacklist_file:
+ required: false
+ description:
+ - If specified, use this blacklist file instead of
+ C(/etc/modprobe.d/blacklist-ansible.conf).
+ default: null
+requirements: []
+'''
+
+EXAMPLES = '''
+# Blacklist the nouveau driver module
+- kernel_blacklist: name=nouveau state=present
+'''
+
+
+class Blacklist(object):
+ def __init__(self, module, filename):
+ if not os.path.exists(filename):
+ open(filename, 'a').close()
+
+ self.filename = filename
+ self.module = module
+
+ def get_pattern(self):
+ return '^blacklist\s*' + self.module + '$'
+
+ def readlines(self):
+ f = open(self.filename, 'r')
+ lines = f.readlines()
+ f.close()
+ return lines
+
+ def module_listed(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ for line in lines:
+ stripped = line.strip()
+ if stripped.startswith('#'):
+ continue
+
+ if re.match(pattern, stripped):
+ return True
+
+ return False
+
+ def remove_module(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ f = open(self.filename, 'w')
+
+ for line in lines:
+ if not re.match(pattern, line.strip()):
+ f.write(line)
+
+ f.close()
+
+ def add_module(self):
+ f = open(self.filename, 'a')
+ f.write('blacklist %s\n' % self.module)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ blacklist_file=dict(required=False, default=None)
+ ),
+ supports_check_mode=False,
+ )
+
+ args = dict(changed=False, failed=False,
+ name=module.params['name'], state=module.params['state'])
+
+ filename = '/etc/modprobe.d/blacklist-ansible.conf'
+
+ if module.params['blacklist_file']:
+ filename = module.params['blacklist_file']
+
+ blacklist = Blacklist(args['name'], filename)
+
+ if blacklist.module_listed():
+ if args['state'] == 'absent':
+ blacklist.remove_module()
+ args['changed'] = True
+ else:
+ if args['state'] == 'present':
+ blacklist.add_module()
+ args['changed'] = True
+
+ module.exit_json(**args)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/known_hosts.py b/lib/ansible/modules/extras/system/known_hosts.py
new file mode 100644
index 0000000000..0c9f24f4c2
--- /dev/null
+++ b/lib/ansible/modules/extras/system/known_hosts.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+
+"""
+Ansible module to manage the ssh known_hosts file.
+Copyright(c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
+
+This module is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This module is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this module. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: known_hosts
+short_description: Add or remove a host from the C(known_hosts) file
+description:
+ - The M(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
+ This is useful if you're going to want to use the M(git) module over ssh, for example.
+ - If you have a very large number of host keys to manage, you will find the M(template) module more useful.
+version_added: "1.9"
+options:
+ name:
+ aliases: [ 'host' ]
+ description:
+ - The host to add or remove (must match a host specified in key)
+ required: true
+ default: null
+ key:
+ description:
+ - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed). The key must be in the right format for ssh (see sshd(1), section "SSH_KNOWN_HOSTS FILE FORMAT")
+ required: false
+ default: null
+ path:
+ description:
+ - The known_hosts file to edit
+ required: no
+ default: "(homedir)+/.ssh/known_hosts"
+ state:
+ description:
+ - I(present) to add the host key, I(absent) to remove it.
+ choices: [ "present", "absent" ]
+ required: no
+ default: present
+requirements: [ ]
+author: "Matthew Vernon (@mcv21)"
+'''
+
+EXAMPLES = '''
+# Example using with_file to set the system known_hosts file
+- name: tell the host about our servers it might want to ssh to
+ known_hosts: path='/etc/ssh/ssh_known_hosts'
+ name='foo.com.invalid'
+ key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
+'''
+
+# Makes sure public host keys are present or absent in the given known_hosts
+# file.
+#
+# Arguments
+# =========
+# name = hostname whose key should be added (alias: host)
+# key = line(s) to add to known_hosts file
+# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
+# state = absent|present (default: present)
+
+import os
+import os.path
+import tempfile
+import errno
+import re
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
+
+def enforce_state(module, params):
+ """
+ Add or remove key.
+ """
+
+ host = params["name"]
+ key = params.get("key",None)
+ port = params.get("port",None)
+ path = params.get("path")
+ state = params.get("state")
+ #Find the ssh-keygen binary
+ sshkeygen = module.get_bin_path("ssh-keygen",True)
+
+ # Trailing newline in files gets lost, so re-add if necessary
+ if key and key[-1] != '\n':
+ key+='\n'
+
+ if key is None and state != "absent":
+ module.fail_json(msg="No key specified when adding a host")
+
+ sanity_check(module,host,key,sshkeygen)
+
+ found,replace_or_add,found_line=search_for_host_key(module,host,key,path,sshkeygen)
+
+ #We will change state if found==True & state!="present"
+ #or found==False & state=="present"
+ #i.e found XOR (state=="present")
+ #Alternatively, if replace is true (i.e. key present, and we must change it)
+ if module.check_mode:
+ module.exit_json(changed = replace_or_add or (state=="present") != found)
+
+ #Now do the work.
+
+ #Only remove whole host if found and no key provided
+ if found and key is None and state=="absent":
+ module.run_command([sshkeygen,'-R',host,'-f',path], check_rc=True)
+ params['changed'] = True
+
+ #Next, add a new (or replacing) entry
+ if replace_or_add or found != (state=="present"):
+ try:
+ inf=open(path,"r")
+ except IOError:
+ e = get_exception()
+ if e.errno == errno.ENOENT:
+ inf=None
+ else:
+ module.fail_json(msg="Failed to read %s: %s" % \
+ (path,str(e)))
+ try:
+ outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path))
+ if inf is not None:
+ for line_number, line in enumerate(inf, start=1):
+ if found_line==line_number and (replace_or_add or state=='absent'):
+ continue # skip this line to replace its key
+ outf.write(line)
+ inf.close()
+ if state == 'present':
+ outf.write(key)
+ outf.flush()
+ module.atomic_move(outf.name,path)
+ except (IOError,OSError):
+ e = get_exception()
+ module.fail_json(msg="Failed to write to file %s: %s" % \
+ (path,str(e)))
+
+ try:
+ outf.close()
+ except:
+ pass
+
+ params['changed'] = True
+
+ return params
+
+def sanity_check(module,host,key,sshkeygen):
+ '''Check supplied key is sensible
+
+ host and key are parameters provided by the user; If the host
+ provided is inconsistent with the key supplied, then this function
+ quits, providing an error to the user.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ #If no key supplied, we're doing a removal, and have nothing to check here.
+ if key is None:
+ return
+ #Rather than parsing the key ourselves, get ssh-keygen to do it
+ #(this is essential for hashed keys, but otherwise useful, as the
+ #key question is whether ssh-keygen thinks the key matches the host).
+
+ #The approach is to write the key to a temporary file,
+ #and then attempt to look up the specified host in that file.
+ try:
+ outf=tempfile.NamedTemporaryFile()
+ outf.write(key)
+ outf.flush()
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg="Failed to write to temporary file %s: %s" % \
+ (outf.name,str(e)))
+ rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,
+ '-f',outf.name],
+ check_rc=True)
+ try:
+ outf.close()
+ except:
+ pass
+
+ if stdout=='': #host not found
+ module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
+
+def search_for_host_key(module,host,key,path,sshkeygen):
+ '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
+
+ Looks up host and keytype in the known_hosts file path; if it's there, looks to see
+ if one of those entries matches key. Returns:
+ found (Boolean): is host found in path?
+ replace_or_add (Boolean): is the key in path different to that supplied by user?
+ found_line (int or None): the line where a key of the same type was found
+ if found=False, then replace is always False.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ if os.path.exists(path)==False:
+ return False, False, None
+ #openssh >=6.4 has changed ssh-keygen behaviour such that it returns
+ #1 if no host is found, whereas previously it returned 0
+ rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path],
+ check_rc=False)
+ if stdout=='' and stderr=='' and (rc==0 or rc==1):
+ return False, False, None #host not found, no other errors
+ if rc!=0: #something went wrong
+ module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr))
+
+ #If user supplied no key, we don't want to try and replace anything with it
+ if key is None:
+ return True, False, None
+
+ lines=stdout.split('\n')
+ new_key = normalize_known_hosts_key(key, host)
+
+ for l in lines:
+ if l=='':
+ continue
+ elif l[0]=='#': # info output from ssh-keygen; contains the line number where key was found
+ try:
+ # This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
+ # It always outputs the non-localized comment before the found key
+ found_line = int(re.search(r'found: line (\d+)', l).group(1))
+ except IndexError:
+ e = get_exception()
+ module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
+ else:
+ found_key = normalize_known_hosts_key(l,host)
+ if new_key==found_key: #found a match
+ return True, False, found_line #found exactly the same key, don't replace
+ elif new_key['type'] == found_key['type']: # found a different key for the same key type
+ return True, True, found_line
+ #No match found, return found and replace, but no line
+ return True, True, None
+
+def normalize_known_hosts_key(key, host):
+ '''
+ Transform a key, either taken from a known_host file or provided by the
+ user, into a normalized form.
+ The host part (which might include multiple hostnames or be hashed) gets
+ replaced by the provided host. Also, any spurious information gets removed
+ from the end (like the username@host tag usually present in hostkeys, but
+ absent in known_hosts files)
+ '''
+ k=key.strip() #trim trailing newline
+ k=key.split()
+ d = dict()
+ #The optional "marker" field, used for @cert-authority or @revoked
+ if k[0][0] == '@':
+ d['options'] = k[0]
+ d['host']=host
+ d['type']=k[2]
+ d['key']=k[3]
+ else:
+ d['host']=host
+ d['type']=k[1]
+ d['key']=k[2]
+ return d
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True, type='str', aliases=['host']),
+ key = dict(required=False, type='str'),
+ path = dict(default="~/.ssh/known_hosts", type='path'),
+ state = dict(default='present', choices=['absent','present']),
+ ),
+ supports_check_mode = True
+ )
+
+ results = enforce_state(module,module.params)
+ module.exit_json(**results)
+
+main()
diff --git a/lib/ansible/modules/extras/system/locale_gen.py b/lib/ansible/modules/extras/system/locale_gen.py
new file mode 100644
index 0000000000..9aa732f57c
--- /dev/null
+++ b/lib/ansible/modules/extras/system/locale_gen.py
@@ -0,0 +1,239 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales.
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+version_added: "1.6"
+author: "Augustus Kling (@AugustusKling)"
+options:
+ name:
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - Whether the locale shall be present.
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+'''
+
+EXAMPLES = '''
+# Ensure a locale exists.
+- locale_gen: name=de_CH.UTF-8 state=present
+'''
+
+import os
+import os.path
+from subprocess import Popen, PIPE, call
+import re
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.iteritems():
+ name = name.replace(s, r)
+ return name
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = '#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = '%s \g<charset>' % (name)
+ else:
+ new_string = '# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState=="present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue!=0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState=="present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue!=0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
+
+# ==============================================================
+# main
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(choices=['present','absent'], default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locales you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state!=state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode==False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError:
+ e = get_exception()
+ module.fail_json(msg=e.strerror, exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+main()
diff --git a/lib/ansible/modules/extras/system/lvg.py b/lib/ansible/modules/extras/system/lvg.py
new file mode 100644
index 0000000000..d22f3750b7
--- /dev/null
+++ b/lib/ansible/modules/extras/system/lvg.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author: "Alexander Bulimov (@abulimov)"
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+version_added: "1.1"
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ required: false
+ pesize:
+ description:
+ - The size of the physical extent in megabytes. Must be a power of 2.
+ default: 4
+ required: false
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ default: null
+ required: false
+ version_added: "1.6"
+ state:
+ choices: [ "present", "absent" ]
+ default: present
+ description:
+ - Control if the volume group exists.
+ required: false
+ force:
+ choices: [ "yes", "no" ]
+ default: "no"
+ description:
+ - If yes, allows to remove volume group with logical volumes.
+ required: false
+notes:
+ - module does not modify PE size for already present volume group
+'''
+
+EXAMPLES = '''
+# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.
+- lvg: vg=vg.services pvs=/dev/sda1 pesize=32
+
+# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5
+
+# Remove a volume group with name vg.services.
+- lvg: vg=vg.services state=absent
+'''
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ vg=dict(required=True),
+ pvs=dict(type='list'),
+ pesize=dict(type='int', default=4),
+ vg_options=dict(default=''),
+ state=dict(choices=["absent", "present"], default='present'),
+ force=dict(type='bool', default='no'),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pesize = module.params['pesize']
+ vgoptions = module.params['vg_options'].split()
+
+ if module.params['pvs']:
+ dev_list = module.params['pvs']
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state=='present':
+ ### check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found."%test_dev)
+
+ ### get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err)
+
+ ### check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ ### create VG
+ if module.check_mode:
+ changed = True
+ else:
+ ### create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ ### remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg))
+
+ ### resize VG
+ current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ ### create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
+ ### add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err)
+
+ ### remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err)
+
+ module.exit_json(changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/lvol.py b/lib/ansible/modules/extras/system/lvol.py
new file mode 100644
index 0000000000..978ce7d1c5
--- /dev/null
+++ b/lib/ansible/modules/extras/system/lvol.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author:
+ - "Jeroen Hoekx (@jhoekx)"
+ - "Alexander Bulimov (@abulimov)"
+module: lvol
+short_description: Configure LVM logical volumes
+description:
+ - This module creates, removes or resizes logical volumes.
+version_added: "1.1"
+options:
+ vg:
+ description:
+ - The volume group this logical volume is part of.
+ required: true
+ lv:
+ description:
+ - The name of the logical volume.
+ required: true
+ size:
+ description:
+ - The size of the logical volume, according to lvcreate(8) --size, by
+ default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ Float values must begin with a digit.
+ Resizing using percentage values was not supported prior to 2.1.
+ state:
+ choices: [ "present", "absent" ]
+ default: present
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ required: false
+ active:
+ version_added: "2.2"
+ choices: [ "yes", "no" ]
+ default: "yes"
+ description:
+ - Whether the volume is activate and visible to the host.
+ required: false
+ force:
+ version_added: "1.5"
+ choices: [ "yes", "no" ]
+ default: "no"
+ description:
+ - Shrink or remove operations of volumes requires this switch. Ensures that
+ that filesystems get never corrupted/destroyed by mistake.
+ required: false
+ opts:
+ version_added: "2.0"
+ description:
+ - Free-form options to be passed to the lvcreate command
+ snapshot:
+ version_added: "2.1"
+ description:
+ - The name of the snapshot volume
+ required: false
+ pvs:
+ version_added: "2.2"
+ description:
+ - Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
+ required: false
+ shrink:
+ version_added: "2.2"
+ description:
+ - shrink if current size is higher than size requested
+ required: false
+ default: yes
+notes:
+ - Filesystems on top of the volume are not resized.
+'''
+
+EXAMPLES = '''
+# Create a logical volume of 512m.
+- lvol: vg=firefly lv=test size=512
+
+# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+- lvol: vg=firefly lv=test size=512 pvs=/dev/sda,/dev/sdb
+
+# Create cache pool logical volume
+- lvol: vg=firefly lv=lvcache size=512m opts='--type cache-pool'
+
+# Create a logical volume of 512g.
+- lvol: vg=firefly lv=test size=512g
+
+# Create a logical volume the size of all remaining space in the volume group
+- lvol: vg=firefly lv=test size=100%FREE
+
+# Create a logical volume with special options
+- lvol: vg=firefly lv=test size=512g opts="-r 16"
+
+# Extend the logical volume to 1024m.
+- lvol: vg=firefly lv=test size=1024
+
+# Extend the logical volume to consume all remaining space in the volume group
+- lvol: vg=firefly lv=test size=+100%FREE
+
+# Extend the logical volume to take all remaining space of the PVs
+- lvol: vg=firefly lv=test size=100%PVS
+
+# Resize the logical volume to % of VG
+- lvol: vg-firefly lv=test size=80%VG force=yes
+
+# Reduce the logical volume to 512m
+- lvol: vg=firefly lv=test size=512 force=yes
+
+# Set the logical volume to 512m and do not try to shrink if size is lower than current one
+- lvol: vg=firefly lv=test size=512 shrink=no
+
+# Remove the logical volume.
+- lvol: vg=firefly lv=test state=absent force=yes
+
+# Create a snapshot volume of the test logical volume.
+- lvol: vg=firefly lv=test snapshot=snap1 size=100m
+
+# Deactivate a logical volume
+- lvol: vg=firefly lv=test active=false
+
+# Create a deactivated logical volume
+- lvol: vg=firefly lv=test size=512g active=false
+'''
+
+import re
+
+decimal_point = re.compile(r"(\d+)")
+
+def mkversion(major, minor, patch):
+ return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
+
+def parse_lvs(data):
+ lvs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ lvs.append({
+ 'name': parts[0].replace('[','').replace(']',''),
+ 'size': int(decimal_point.match(parts[1]).group(1)),
+ 'active': (parts[2][4] == 'a')
+ })
+ return lvs
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': int(decimal_point.match(parts[1]).group(1)),
+ 'free': int(decimal_point.match(parts[2]).group(1)),
+ 'ext_size': int(decimal_point.match(parts[3]).group(1))
+ })
+ return vgs
+
+
+def get_lvm_version(module):
+ ver_cmd = module.get_bin_path("lvm", required=True)
+ rc, out, err = module.run_command("%s version" % (ver_cmd))
+ if rc != 0:
+ return None
+ m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
+ if not m:
+ return None
+ return mkversion(m.group(1), m.group(2), m.group(3))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(required=True),
+ lv=dict(required=True),
+ size=dict(type='str'),
+ opts=dict(type='str'),
+ state=dict(choices=["absent", "present"], default='present'),
+ force=dict(type='bool', default='no'),
+ shrink=dict(type='bool', default='yes'),
+ active=dict(type='bool', default='yes'),
+ snapshot=dict(type='str', default=None),
+ pvs=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ # Determine if the "--yes" option should be used
+ version_found = get_lvm_version(module)
+ if version_found == None:
+ module.fail_json(msg="Failed to get LVM version number")
+ version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
+ if version_found >= version_yesopt:
+ yesopt = "--yes"
+ else:
+ yesopt = ""
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ size = module.params['size']
+ opts = module.params['opts']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
+ size_opt = 'L'
+ size_unit = 'm'
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
+
+ if opts is None:
+ opts = ""
+
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
+ if size:
+ # LVCREATE(8) -l --extents option with percentage
+ if '%' in size:
+ size_parts = size.split('%', 1)
+ size_percent = int(size_parts[0])
+ if size_percent > 100:
+ module.fail_json(msg="Size percentage cannot be larger than 100%")
+ size_whole = size_parts[1]
+ if size_whole == 'ORIGIN':
+ module.fail_json(msg="Snapshot Volumes are not supported")
+ elif size_whole not in ['VG', 'PVS', 'FREE']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ size_opt = 'l'
+ size_unit = ''
+
+ if not '%' in size:
+ # LVCREATE(8) -L --size option unit
+ if size[-1].lower() in 'bskmgtpe':
+ size_unit = size[-1].lower()
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit(): raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+
+ # when no unit, megabytes by default
+ if size_opt == 'l':
+ unit = 'm'
+ else:
+ unit = size_unit
+
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
+ lvs_cmd = module.get_bin_path("lvs", required=True)
+ rc, current_lvs, err = module.run_command(
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ changed = False
+
+ lvs = parse_lvs(current_lvs)
+
+ if snapshot is None:
+ check_lv = lv
+ else:
+ check_lv = snapshot
+ for test_lv in lvs:
+ if test_lv['name'] == check_lv:
+ this_lv = test_lv
+ break
+ else:
+ this_lv = None
+
+ if state == 'present' and not size:
+ if this_lv is None:
+ module.fail_json(msg="No size given.")
+
+ msg = ''
+ if this_lv is None:
+ if state == 'present':
+ ### create LV
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, _, err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ ### remove LV
+ if not force:
+ module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
+ lvremove_cmd = module.get_bin_path("lvremove", required=True)
+ rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+
+ elif not size:
+ pass
+
+ elif size_opt == 'l':
+ ### Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+ if '+' in size:
+ size_requested += this_lv['size']
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ else:
+ ### resize LV based on absolute values
+ tool = None
+ if int(size) > this_lv['size']:
+ tool = module.get_bin_path("lvextend", required=True)
+ elif shrink and int(size) < this_lv['size']:
+ if int(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ if not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
+
+ module.exit_json(changed=changed, msg=msg)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/system/make.py b/lib/ansible/modules/extras/system/make.py
new file mode 100644
index 0000000000..ee8d07be74
--- /dev/null
+++ b/lib/ansible/modules/extras/system/make.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements: [ make ]
+version_added: "2.1"
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Run targets in a Makefile.
+options:
+ target:
+ description:
+ - The target to run
+ required: false
+ default: none
+ params:
+ description:
+ - Any extra parameters to pass to make
+ required: false
+ default: none
+ chdir:
+ description:
+ - cd into this directory before running make
+ required: true
+'''
+
+EXAMPLES = '''
+# Build the default target
+- make: chdir=/home/ubuntu/cool-project
+
+# Run `install` target as root
+- make: chdir=/home/ubuntu/cool-project target=install
+ become: yes
+
+# Pass in extra arguments to build
+- make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+'''
+
+# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
+# fix this
+RETURN = '''# '''
+
+
+def format_params(params):
+ return [k + '=' + str(v) for k, v in params.iteritems()]
+
+
+def push_arguments(cmd, args):
+ if args['target'] != None:
+ cmd.append(args['target'])
+ if args['params'] != None:
+ cmd.extend(format_params(args['params']))
+ return cmd
+
+
+def check_changed(make_path, module, args):
+ cmd = push_arguments([make_path, '--question'], args)
+ rc, _, __ = module.run_command(cmd, check_rc=False, cwd=args['chdir'])
+ return (rc != 0)
+
+
+def run_make(make_path, module, args):
+ cmd = push_arguments([make_path], args)
+ module.run_command(cmd, check_rc=True, cwd=args['chdir'])
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ target=dict(required=False, default=None, type='str'),
+ params=dict(required=False, default=None, type='dict'),
+ chdir=dict(required=True, default=None, type='str'),
+ ),
+ )
+ args = dict(
+ changed=False,
+ failed=False,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir'],
+ )
+ make_path = module.get_bin_path('make', True)
+
+ # Check if target is up to date
+ args['changed'] = check_changed(make_path, module, args)
+
+ # Check only; don't modify
+ if module.check_mode:
+ module.exit_json(changed=args['changed'])
+
+ # Target is already up to date
+ if args['changed'] == False:
+ module.exit_json(**args)
+
+ run_make(make_path, module, args)
+ module.exit_json(**args)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/system/modprobe.py b/lib/ansible/modules/extras/system/modprobe.py
new file mode 100644
index 0000000000..1bb1d3f70b
--- /dev/null
+++ b/lib/ansible/modules/extras/system/modprobe.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2013, David Stygstra <david.stygstra@gmail.com>
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: modprobe
+short_description: Add or remove kernel modules
+requirements: []
+version_added: 1.4
+author:
+ - "David Stygstra (@stygstra)"
+ - "Julien Dauphant"
+ - "Matt Jeffery"
+description:
+ - Add or remove kernel modules.
+options:
+ name:
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the module should be present or absent.
+ params:
+ required: false
+ default: ""
+ version_added: "1.6"
+ description:
+ - Modules parameters.
+'''
+
+EXAMPLES = '''
+# Add the 802.1q module
+- modprobe: name=8021q state=present
+# Add the dummy module
+- modprobe: name=dummy state=present params="numdummies=2"
+'''
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+import shlex
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'required': True},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'params': {'default': ''},
+ },
+ supports_check_mode=True,
+ )
+ args = {
+ 'changed': False,
+ 'failed': False,
+ 'name': module.params['name'],
+ 'state': module.params['state'],
+ 'params': module.params['params'],
+ }
+
+ # Check if module is present
+ try:
+ modules = open('/proc/modules')
+ present = False
+ module_name = args['name'].replace('-', '_') + ' '
+ for line in modules:
+ if line.startswith(module_name):
+ present = True
+ break
+ modules.close()
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg=str(e), **args)
+
+ # Check only; don't modify
+ if module.check_mode:
+ if args['state'] == 'present' and not present:
+ changed = True
+ elif args['state'] == 'absent' and present:
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+ # Add/remove module as needed
+ if args['state'] == 'present':
+ if not present:
+ command = [module.get_bin_path('modprobe', True), args['name']]
+ command.extend(shlex.split(args['params']))
+ rc, _, err = module.run_command(command)
+ if rc != 0:
+ module.fail_json(msg=err, **args)
+ args['changed'] = True
+ elif args['state'] == 'absent':
+ if present:
+ rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])
+ if rc != 0:
+ module.fail_json(msg=err, **args)
+ args['changed'] = True
+
+ module.exit_json(**args)
+
+main()
diff --git a/lib/ansible/modules/extras/system/ohai.py b/lib/ansible/modules/extras/system/ohai.py
new file mode 100644
index 0000000000..d71d581b62
--- /dev/null
+++ b/lib/ansible/modules/extras/system/ohai.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: ohai
+short_description: Returns inventory data from I(Ohai)
+description:
+ - Similar to the M(facter) module, this runs the I(Ohai) discovery program
+ (U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and
+ returns JSON inventory data.
+ I(Ohai) data is a bit more verbose and nested than I(facter).
+version_added: "0.6"
+options: {}
+notes: []
+requirements: [ "ohai" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+# Retrieve (ohai) data from all Web servers and store in one-file per host
+ansible webservers -m ohai --tree=/tmp/ohaidata
+'''
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict()
+ )
+ cmd = ["/usr/bin/env", "ohai"]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/system/open_iscsi.py b/lib/ansible/modules/extras/system/open_iscsi.py
new file mode 100644
index 0000000000..74349ce868
--- /dev/null
+++ b/lib/ansible/modules/extras/system/open_iscsi.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: open_iscsi
+author: "Serge van Ginderachter (@srvg)"
+version_added: "1.4"
+short_description: Manage iscsi targets with open-iscsi
+description:
+ - Discover targets on given portal, (dis)connect targets, mark targets to
+ manually or auto start, return device nodes of connected targets.
+requirements:
+ - open_iscsi library and tools (iscsiadm)
+options:
+ portal:
+ required: false
+ aliases: [ip]
+ description:
+ - the ip address of the iscsi target
+ port:
+ required: false
+ default: 3260
+ description:
+ - the port on which the iscsi target process listens
+ target:
+ required: false
+ aliases: [name, targetname]
+ description:
+ - the iscsi target name
+ login:
+ required: false
+ choices: [true, false]
+ description:
+ - whether the target node should be connected
+ node_auth:
+ required: false
+ default: CHAP
+ description:
+ - discovery.sendtargets.auth.authmethod
+ node_user:
+ required: false
+ description:
+ - discovery.sendtargets.auth.username
+ node_pass:
+ required: false
+ description:
+ - discovery.sendtargets.auth.password
+ auto_node_startup:
+ aliases: [automatic]
+ required: false
+ choices: [true, false]
+ description:
+ - whether the target node should be automatically connected at startup
+ discover:
+ required: false
+ choices: [true, false]
+ description:
+ - whether the list of target nodes on the portal should be
+ (re)discovered and added to the persistent iscsi database.
+ Keep in mind that iscsiadm discovery resets configurtion, like node.startup
+ to manual, hence combined with auto_node_startup=yes will allways return
+ a changed state.
+ show_nodes:
+ required: false
+ choices: [true, false]
+ description:
+ - whether the list of nodes in the persistent iscsi database should be
+ returned by the module
+'''
+
+EXAMPLES = '''
+# perform a discovery on 10.1.2.3 and show available target nodes
+- open_iscsi: show_nodes=yes discover=yes portal=10.1.2.3
+
+# discover targets on portal and login to the one available
+# (only works if exactly one target is exported to the initiator)
+- open_iscsi: portal={{iscsi_target}} login=yes discover=yes
+
+# description: connect to the named target, after updating the local
+# persistent database (cache)
+- open_iscsi: login=yes target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+# description: discconnect from the cached named target
+- open_iscsi: login=no target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d"
+'''
+
+import glob
+import time
+
+ISCSIADM = 'iscsiadm'
+
+
+def compare_nodelists(l1, l2):
+
+ l1.sort()
+ l2.sort()
+ return l1 == l2
+
+
+def iscsi_get_cached_nodes(module, portal=None):
+
+ cmd = '%s --mode node' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ nodes = []
+ for line in lines:
+ # line format is "ip:port,target_portal_group_tag targetname"
+ parts = line.split()
+ if len(parts) > 2:
+ module.fail_json(msg='error parsing output', cmd=cmd)
+ target = parts[1]
+ parts = parts[0].split(':')
+ target_portal = parts[0]
+
+ if portal is None or portal == target_portal:
+ nodes.append(target)
+
+ # older versions of scsiadm don't have nice return codes
+ # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
+ # err can contain [N|n]o records...
+ elif rc == 21 or (rc == 255 and "o records found" in err):
+ nodes = []
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ return nodes
+
+
+def iscsi_discover(module, portal, port):
+
+ cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_loggedon(module, target):
+
+ cmd = '%s --mode session' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ return target in out
+ elif rc == 21:
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_login(module, target):
+
+ node_auth = module.params['node_auth']
+ node_user = module.params['node_user']
+ node_pass = module.params['node_pass']
+
+ if node_user:
+ params = [('node.session.auth.authmethod', node_auth),
+ ('node.session.auth.username', node_user),
+ ('node.session.auth.password', node_pass)]
+ for (name, value) in params:
+ cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
+ (rc, out, err) = module.run_command(cmd)
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_logout(module, target):
+
+ cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_device_node(module, target):
+
+ # if anyone know a better way to find out which devicenodes get created for
+ # a given target...
+
+ devices = glob.glob('/dev/disk/by-path/*%s*' % target)
+ devdisks = []
+ for dev in devices:
+ # exclude partitions
+ if "-part" not in dev:
+ devdisk = os.path.realpath(dev)
+ # only add once (multi-path?)
+ if devdisk not in devdisks:
+ devdisks.append(devdisk)
+ return devdisks
+
+
+def target_isauto(module, target):
+
+ cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ for line in lines:
+ if 'node.startup' in line:
+ return 'automatic' in line
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setauto(module, target):
+
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setmanual(module, target):
+
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec = dict(
+
+ # target
+ portal = dict(required=False, aliases=['ip']),
+ port = dict(required=False, default=3260),
+ target = dict(required=False, aliases=['name', 'targetname']),
+ node_auth = dict(required=False, default='CHAP'),
+ node_user = dict(required=False),
+ node_pass = dict(required=False),
+
+ # actions
+ login = dict(type='bool', aliases=['state']),
+ auto_node_startup = dict(type='bool', aliases=['automatic']),
+ discover = dict(type='bool', default=False),
+ show_nodes = dict(type='bool', default=False)
+ ),
+
+ required_together=[['discover_user', 'discover_pass'],
+ ['node_user', 'node_pass']],
+ supports_check_mode=True
+ )
+
+ global iscsiadm_cmd
+ iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
+
+ # parameters
+ portal = module.params['portal']
+ target = module.params['target']
+ port = module.params['port']
+ login = module.params['login']
+ automatic = module.params['auto_node_startup']
+ discover = module.params['discover']
+ show_nodes = module.params['show_nodes']
+
+ check = module.check_mode
+
+ cached = iscsi_get_cached_nodes(module, portal)
+
+ # return json dict
+ result = {}
+ result['changed'] = False
+
+ if discover:
+ if portal is None:
+ module.fail_json(msg = "Need to specify at least the portal (ip) to discover")
+ elif check:
+ nodes = cached
+ else:
+ iscsi_discover(module, portal, port)
+ nodes = iscsi_get_cached_nodes(module, portal)
+ if not compare_nodelists(cached, nodes):
+ result['changed'] |= True
+ result['cache_updated'] = True
+ else:
+ nodes = cached
+
+ if login is not None or automatic is not None:
+ if target is None:
+ if len(nodes) > 1:
+ module.fail_json(msg = "Need to specify a target")
+ else:
+ target = nodes[0]
+ else:
+ # check given target is in cache
+ check_target = False
+ for node in nodes:
+ if node == target:
+ check_target = True
+ break
+ if not check_target:
+ module.fail_json(msg = "Specified target not found")
+
+ if show_nodes:
+ result['nodes'] = nodes
+
+ if login is not None:
+ loggedon = target_loggedon(module, target)
+ if (login and loggedon) or (not login and not loggedon):
+ result['changed'] |= False
+ if login:
+ result['devicenodes'] = target_device_node(module, target)
+ elif not check:
+ if login:
+ target_login(module, target)
+ # give udev some time
+ time.sleep(1)
+ result['devicenodes'] = target_device_node(module, target)
+ else:
+ target_logout(module, target)
+ result['changed'] |= True
+ result['connection_changed'] = True
+ else:
+ result['changed'] |= True
+ result['connection_changed'] = True
+
+ if automatic is not None:
+ isauto = target_isauto(module, target)
+ if (automatic and isauto) or (not automatic and not isauto):
+ result['changed'] |= False
+ result['automatic_changed'] = False
+ elif not check:
+ if automatic:
+ target_setauto(module, target)
+ else:
+ target_setmanual(module, target)
+ result['changed'] |= True
+ result['automatic_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_changed'] = True
+
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/system/osx_defaults.py b/lib/ansible/modules/extras/system/osx_defaults.py
new file mode 100644
index 0000000000..93d8130586
--- /dev/null
+++ b/lib/ansible/modules/extras/system/osx_defaults.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: osx_defaults
+author: Franck Nijhof (@frenck)
+short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible
+description:
+ - osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts.
+ Mac OS X applications and other programs use the defaults system to record user preferences and other
+ information that must be maintained when the applications aren't running (such as default font for new
+ documents, or the position of an Info panel).
+version_added: "2.0"
+options:
+ domain:
+ description:
+ - The domain is a domain name of the form com.companyname.appname.
+ required: false
+ default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply. The special value "currentHost" corresponds to the
+ "-currentHost" switch of the defaults commandline tool.
+ required: false
+ default: null
+ version_added: "2.1"
+ key:
+ description:
+ - The key of the user preference
+ required: true
+ type:
+ description:
+ - The type of value to write.
+ required: false
+ default: string
+ choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ]
+ array_add:
+ description:
+ - Add new elements to the array for a key which has an array as its value.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ value:
+ description:
+ - The value to write. Only required when state = present.
+ required: false
+ default: null
+ state:
+ description:
+ - The state of the user defaults
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+notes:
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+'''
+
+EXAMPLES = '''
+- osx_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present
+- osx_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present
+- osx_defaults: domain=com.apple.screensaver host=currentHost key=showClock type=int value=1
+- osx_defaults: key=AppleMeasurementUnits type=string value=Centimeters
+- osx_defaults:
+ key: AppleLanguages
+ type: array
+ value: ["en", "nl"]
+- osx_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent
+'''
+
+import datetime
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+# exceptions --------------------------------------------------------------- {{{
+class OSXDefaultsException(Exception):
+ pass
+# /exceptions -------------------------------------------------------------- }}}
+
+# class MacDefaults -------------------------------------------------------- {{{
+class OSXDefaults(object):
+
+ """ Class to manage Mac OS user defaults """
+
+ # init ---------------------------------------------------------------- {{{
+ """ Initialize this module. Finds 'defaults' executable and preps the parameters """
+ def __init__(self, **kwargs):
+
+ # Initial var for storing current defaults value
+ self.current_value = None
+
+ # Just set all given parameters
+ for key, val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ # Try to find the defaults executable
+ self.executable = self.module.get_bin_path(
+ 'defaults',
+ required=False,
+ opt_dirs=self.path.split(':'),
+ )
+
+ if not self.executable:
+ raise OSXDefaultsException("Unable to locate defaults executable.")
+
+ # When state is present, we require a parameter
+ if self.state == "present" and self.value is None:
+ raise OSXDefaultsException("Missing value parameter")
+
+ # Ensure the value is the correct type
+ self.value = self._convert_type(self.type, self.value)
+
+ # /init --------------------------------------------------------------- }}}
+
+ # tools --------------------------------------------------------------- {{{
+ """ Converts value to given type """
+ def _convert_type(self, type, value):
+
+ if type == "string":
+ return str(value)
+ elif type in ["bool", "boolean"]:
+ if isinstance(value, basestring):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
+ return True
+ elif value in [False, 0, "false", "0", "no"]:
+ return False
+ raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
+ elif type == "date":
+ try:
+ return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ except ValueError:
+ raise OSXDefaultsException(
+ "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
+ )
+ elif type in ["int", "integer"]:
+ if not str(value).isdigit():
+ raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
+ return int(value)
+ elif type == "float":
+ try:
+ value = float(value)
+ except ValueError:
+ raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
+ return value
+ elif type == "array":
+ if not isinstance(value, list):
+ raise OSXDefaultsException("Invalid value. Expected value to be an array")
+ return value
+
+ raise OSXDefaultsException('Type is not supported: {0}'.format(type))
+
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ def _host_args(self):
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ def _base_command(self):
+ return [self.executable] + self._host_args()
+
+ """ Converts array output from defaults to an list """
+ @staticmethod
+ def _convert_defaults_str_to_list(value):
+
+ # Split output of defaults. Every line contains a value
+ value = value.splitlines()
+
+ # Remove first and last item, those are not actual values
+ value.pop(0)
+ value.pop(-1)
+
+ # Remove extra spaces and comma (,) at the end of values
+ value = [re.sub(',$', '', x.strip(' ')) for x in value]
+
+ return value
+ # /tools -------------------------------------------------------------- }}}
+
+ # commands ------------------------------------------------------------ {{{
+ """ Reads value of this domain & key from defaults """
+ def read(self):
+ # First try to find out the type
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
+
+ # If RC is 1, the key does not exists
+ if rc == 1:
+ return None
+
+ # If the RC is not 0, then terrible happened! Ooooh nooo!
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out)
+
+ # Ok, lets parse the type from output
+ type = out.strip().replace('Type is ', '')
+
+ # Now get the current value
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
+
+ # Strip output
+ out = out.strip()
+
+ # An non zero RC at this point is kinda strange...
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out)
+
+ # Convert string to list when type is array
+ if type == "array":
+ out = self._convert_defaults_str_to_list(out)
+
+ # Store the current_value
+ self.current_value = self._convert_type(type, out)
+
+ """ Writes value to this domain & key to defaults """
+ def write(self):
+
+ # We need to convert some values so the defaults commandline understands it
+ if type(self.value) is bool:
+ if self.value:
+ value = "TRUE"
+ else:
+ value = "FALSE"
+ elif type(self.value) is int or type(self.value) is float:
+ value = str(self.value)
+ elif self.array_add and self.current_value is not None:
+ value = list(set(self.value) - set(self.current_value))
+ elif isinstance(self.value, datetime.datetime):
+ value = self.value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ value = self.value
+
+ # When the type is array and array_add is enabled, morph the type :)
+ if self.type == "array" and self.array_add:
+ self.type = "array-add"
+
+ # All values should be a list, for easy passing it to the command
+ if not isinstance(value, list):
+ value = [value]
+
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
+
+ if rc != 0:
+ raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
+
+ """ Deletes defaults key from domain """
+ def delete(self):
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
+
+ # /commands ----------------------------------------------------------- }}}
+
+ # run ----------------------------------------------------------------- {{{
+ """ Does the magic! :) """
+ def run(self):
+
+ # Get the current value from defaults
+ self.read()
+
+ # Handle absent state
+ if self.state == "absent":
+ if self.current_value is None:
+ return False
+ if self.module.check_mode:
+ return True
+ self.delete()
+ return True
+
+ # There is a type mismatch! Given type does not match the type in defaults
+ if self.current_value is not None and type(self.current_value) is not type(self.value):
+ raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
+
+ # Current value matches the given value. Nothing need to be done. Arrays need extra care
+ if self.type == "array" and self.current_value is not None and not self.array_add and \
+ set(self.current_value) == set(self.value):
+ return False
+ elif self.type == "array" and self.current_value is not None and self.array_add and \
+ len(list(set(self.value) - set(self.current_value))) == 0:
+ return False
+ elif self.current_value == self.value:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ # Change/Create/Set given key/value for domain in defaults
+ self.write()
+ return True
+
+ # /run ---------------------------------------------------------------- }}}
+
+# /class MacDefaults ------------------------------------------------------ }}}
+
+
+# main -------------------------------------------------------------------- {{{
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(
+ default="NSGlobalDomain",
+ required=False,
+ ),
+ host=dict(
+ default=None,
+ required=False,
+ ),
+ key=dict(
+ default=None,
+ ),
+ type=dict(
+ default="string",
+ required=False,
+ choices=[
+ "array",
+ "bool",
+ "boolean",
+ "date",
+ "float",
+ "int",
+ "integer",
+ "string",
+ ],
+ ),
+ array_add=dict(
+ default=False,
+ required=False,
+ type='bool',
+ ),
+ value=dict(
+ default=None,
+ required=False,
+ ),
+ state=dict(
+ default="present",
+ required=False,
+ choices=[
+ "absent", "present"
+ ],
+ ),
+ path=dict(
+ default="/usr/bin:/usr/local/bin",
+ required=False,
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ host = module.params['host']
+ key = module.params['key']
+ type = module.params['type']
+ array_add = module.params['array_add']
+ value = module.params['value']
+ state = module.params['state']
+ path = module.params['path']
+
+ try:
+ defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
+ array_add=array_add, value=value, state=state, path=path)
+ changed = defaults.run()
+ module.exit_json(changed=changed)
+ except OSXDefaultsException:
+ e = get_exception()
+ module.fail_json(msg=e.message)
+
+# /main ------------------------------------------------------------------- }}}
+
+main()
diff --git a/lib/ansible/modules/extras/system/pam_limits.py b/lib/ansible/modules/extras/system/pam_limits.py
new file mode 100644
index 0000000000..8e6bdbe969
--- /dev/null
+++ b/lib/ansible/modules/extras/system/pam_limits.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import os.path
+import shutil
+import re
+
+DOCUMENTATION = '''
+---
+module: pam_limits
+version_added: "2.0"
+authors:
+ - "Sebastien Rohaut (@usawa)"
+short_description: Modify Linux PAM limits
+description:
+ - The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.
+ For the full documentation, see man limits.conf(5).
+options:
+ domain:
+ description:
+ - A username, @groupname, wildcard, uid/gid range.
+ required: true
+ limit_type:
+ description:
+ - Limit type, see C(man limits) for an explanation
+ required: true
+ choices: [ "hard", "soft", "-" ]
+ limit_item:
+ description:
+ - The limit to be set
+ required: true
+ choices: [ "core", "data", "fsize", "memlock", "nofile", "rss", "stack", "cpu", "nproc", "as", "maxlogins", "maxsyslogins", "priority", "locks", "sigpending", "msgqueue", "nice", "rtprio", "chroot" ]
+ value:
+ description:
+ - The value of the limit.
+ required: true
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ choices: [ "yes", "no" ]
+ default: "no"
+ use_min:
+ description:
+ - If set to C(yes), the minimal value will be used or conserved.
+ If the specified value is inferior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ choices: [ "yes", "no" ]
+ default: "no"
+ use_max:
+ description:
+ - If set to C(yes), the maximal value will be used or conserved.
+ If the specified value is superior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ choices: [ "yes", "no" ]
+ default: "no"
+ dest:
+ description:
+ - Modify the limits.conf path.
+ required: false
+ default: "/etc/security/limits.conf"
+ comment:
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
+'''
+
+EXAMPLES = '''
+# Add or modify nofile soft limit for the user joe
+- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000
+
+# Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
+- pam_limits: domain=smith limit_type=hard limit_item=fsize value=1000000 use_max=yes
+
+# Add or modify memlock, both soft and hard, limit for the user james with a comment.
+- pam_limits: domain=james limit_type=- limit_item=memlock value=unlimited comment="unlimited memory lock for james"
+'''
+
+def main():
+
+ pam_items = [ 'core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot' ]
+
+ pam_types = [ 'soft', 'hard', '-' ]
+
+ limits_conf = '/etc/security/limits.conf'
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec = dict(
+ domain = dict(required=True, type='str'),
+ limit_type = dict(required=True, type='str', choices=pam_types),
+ limit_item = dict(required=True, type='str', choices=pam_items),
+ value = dict(required=True, type='str'),
+ use_max = dict(default=False, type='bool'),
+ use_min = dict(default=False, type='bool'),
+ backup = dict(default=False, type='bool'),
+ dest = dict(default=limits_conf, type='str'),
+ comment = dict(required=False, default='', type='str')
+ )
+ )
+
+ domain = module.params['domain']
+ limit_type = module.params['limit_type']
+ limit_item = module.params['limit_item']
+ value = module.params['value']
+ use_max = module.params['use_max']
+ use_min = module.params['use_min']
+ backup = module.params['backup']
+ limits_conf = module.params['dest']
+ new_comment = module.params['comment']
+
+ changed = False
+
+ if os.path.isfile(limits_conf):
+ if not os.access(limits_conf, os.W_OK):
+ module.fail_json(msg="%s is not writable. Use sudo" % (limits_conf) )
+ else:
+ module.fail_json(msg="%s is not visible (check presence, access rights, use sudo)" % (limits_conf) )
+
+ if use_max and use_min:
+ module.fail_json(msg="Cannot use use_min and use_max at the same time." )
+
+ if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
+
+ # Backup
+ if backup:
+ backup_file = module.backup_local(limits_conf)
+
+ space_pattern = re.compile(r'\s+')
+
+ message = ''
+ f = open (limits_conf, 'r')
+ # Tempfile
+ nf = tempfile.NamedTemporaryFile(delete = False)
+
+ found = False
+ new_value = value
+
+ for line in f:
+
+ if line.startswith('#'):
+ nf.write(line)
+ continue
+
+ newline = re.sub(space_pattern, ' ', line).strip()
+ if not newline:
+ nf.write(line)
+ continue
+
+ # Remove comment in line
+ newline = newline.split('#',1)[0]
+ try:
+ old_comment = line.split('#',1)[1]
+ except:
+ old_comment = ''
+
+ newline = newline.rstrip()
+
+ if not new_comment:
+ new_comment = old_comment
+
+ if new_comment:
+ new_comment = "\t#"+new_comment
+
+ line_fields = newline.split(' ')
+
+ if len(line_fields) != 4:
+ nf.write(line)
+ continue
+
+ line_domain = line_fields[0]
+ line_type = line_fields[1]
+ line_item = line_fields[2]
+ actual_value = line_fields[3]
+
+ if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
+ module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
+
+ # Found the line
+ if line_domain == domain and line_type == limit_type and line_item == limit_item:
+ found = True
+ if value == actual_value:
+ message = line
+ nf.write(line)
+ continue
+
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+
+ if use_max:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(max(int(value), int(actual_value)))
+ elif actual_value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ if use_min:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(min(int(value), int(actual_value)))
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ # Change line only if value has changed
+ if new_value != actual_value:
+ changed = True
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+ else:
+ message = line
+ nf.write(line)
+ else:
+ nf.write(line)
+
+ if not found:
+ changed = True
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+
+ f.close()
+ nf.flush()
+
+ # Copy tempfile to newfile
+ module.atomic_move(nf.name, f.name)
+
+ try:
+ nf.close()
+ except:
+ pass
+
+ res_args = dict(
+ changed = changed, msg = message
+ )
+
+ if backup:
+ res_args['backup_file'] = backup_file
+
+ module.exit_json(**res_args)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/puppet.py b/lib/ansible/modules/extras/system/puppet.py
new file mode 100644
index 0000000000..97c1a3eb38
--- /dev/null
+++ b/lib/ansible/modules/extras/system/puppet.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pipes
+import stat
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+DOCUMENTATION = '''
+---
+module: puppet
+short_description: Runs puppet
+description:
+ - Runs I(puppet) agent or apply in a reliable manner
+version_added: "2.0"
+options:
+ timeout:
+ description:
+ - How long to wait for I(puppet) to finish.
+ required: false
+ default: 30m
+ puppetmaster:
+ description:
+ - The hostname of the puppetmaster to contact.
+ required: false
+ default: None
+ manifest:
+ description:
+ - Path to the manifest file to run puppet apply on.
+ required: false
+ default: None
+ facts:
+ description:
+ - A dict of values to pass in as persistent external facter facts
+ required: false
+ default: None
+ facter_basename:
+ description:
+ - Basename of the facter output file
+ required: false
+ default: ansible
+ environment:
+ description:
+ - Puppet environment to be used.
+ required: false
+ default: None
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used
+ required: false
+ default: stdout
+ choices: [ 'stdout', 'syslog' ]
+ version_added: "2.1"
+ certname:
+ description:
+ - The name to use when handling certificates.
+ required: false
+ default: None
+ version_added: "2.1"
+ tags:
+ description:
+ - A comma-separated list of puppet tags to be used.
+ required: false
+ default: None
+ version_added: "2.1"
+ execute:
+ description:
+ - Execute a specific piece of Puppet code. It has no effect with
+ a puppetmaster.
+ required: false
+ default: None
+ version_added: "2.1"
+requirements: [ puppet ]
+author: "Monty Taylor (@emonty)"
+'''
+
+EXAMPLES = '''
+# Run puppet agent and fail if anything goes wrong
+- puppet
+
+# Run puppet and timeout in 5 minutes
+- puppet: timeout=5m
+
+# Run puppet using a different environment
+- puppet: environment=testing
+
+# Run puppet using a specific certname
+- puppet: certname=agent01.example.com
+# Run puppet using a specific piece of Puppet code. Has no effect with a
+# puppetmaster.
+- puppet: execute='include ::mymodule'
+
+# Run puppet using a specific tags
+- puppet: tags=update,nginx
+'''
+
+
+def _get_facter_dir():
+ if os.getuid() == 0:
+ return '/etc/facter/facts.d'
+ else:
+ return os.path.expanduser('~/.facter/facts.d')
+
+
+def _write_structured_data(basedir, basename, data):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ file_path = os.path.join(basedir, "{0}.json".format(basename))
+ # This is more complex than you might normally expect because we want to
+ # open the file with only u+rw set. Also, we use the stat constants
+ # because ansible still supports python 2.4 and the octal syntax changed
+ out_file = os.fdopen(
+ os.open(
+ file_path, os.O_CREAT | os.O_WRONLY,
+ stat.S_IRUSR | stat.S_IWUSR), 'w')
+ out_file.write(json.dumps(data).encode('utf8'))
+ out_file.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(default="30m"),
+ puppetmaster=dict(required=False, default=None),
+ manifest=dict(required=False, default=None),
+ logdest=dict(
+ required=False, default='stdout',
+ choices=['stdout', 'syslog']),
+ show_diff=dict(
+ # internal code to work with --diff, do not use
+ default=False, aliases=['show-diff'], type='bool'),
+ facts=dict(default=None),
+ facter_basename=dict(default='ansible'),
+ environment=dict(required=False, default=None),
+ certname=dict(required=False, default=None),
+ tags=dict(required=False, default=None, type='list'),
+ execute=dict(required=False, default=None),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
+ ],
+ )
+ p = module.params
+
+ global PUPPET_CMD
+ PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
+
+ if not PUPPET_CMD:
+ module.fail_json(
+ msg="Could not find puppet. Please ensure it is installed.")
+
+ global TIMEOUT_CMD
+ TIMEOUT_CMD = module.get_bin_path("timeout", False)
+
+ if p['manifest']:
+ if not os.path.exists(p['manifest']):
+ module.fail_json(
+ msg="Manifest file %(manifest)s not found." % dict(
+ manifest=p['manifest']))
+
+ # Check if puppet is disabled here
+ if not p['manifest']:
+ rc, stdout, stderr = module.run_command(
+ PUPPET_CMD + " config print agent_disabled_lockfile")
+ if os.path.exists(stdout.strip()):
+ module.fail_json(
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
+ elif rc != 0:
+ module.fail_json(
+ msg="Puppet agent state could not be determined.")
+
+ if module.params['facts'] and not module.check_mode:
+ _write_structured_data(
+ _get_facter_dir(),
+ module.params['facter_basename'],
+ module.params['facts'])
+
+ if TIMEOUT_CMD:
+ base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
+ timeout_cmd=TIMEOUT_CMD,
+ timeout=pipes.quote(p['timeout']),
+ puppet_cmd=PUPPET_CMD)
+ else:
+ base_cmd = PUPPET_CMD
+
+ if not p['manifest']:
+ cmd = ("%(base_cmd)s agent --onetime"
+ " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
+ " --detailed-exitcodes --verbose --color 0") % dict(
+ base_cmd=base_cmd,
+ )
+ if p['puppetmaster']:
+ cmd += " --server %s" % pipes.quote(p['puppetmaster'])
+ if p['show_diff']:
+ cmd += " --show_diff"
+ if p['environment']:
+ cmd += " --environment '%s'" % p['environment']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if module.check_mode:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ else:
+ cmd = "%s apply --detailed-exitcodes " % base_cmd
+ if p['logdest'] == 'syslog':
+ cmd += "--logdest syslog "
+ if p['environment']:
+ cmd += "--environment '%s' " % p['environment']
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if p['execute']:
+ cmd += " --execute '%s'" % p['execute']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if module.check_mode:
+ cmd += "--noop "
+ else:
+ cmd += "--no-noop "
+ cmd += pipes.quote(p['manifest'])
+ rc, stdout, stderr = module.run_command(cmd)
+
+ if rc == 0:
+ # success
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
+ elif rc == 1:
+ # rc==1 could be because it's disabled
+ # rc==1 could also mean there was a compilation failure
+ disabled = "administratively disabled" in stdout
+ if disabled:
+ msg = "puppet is disabled"
+ else:
+ msg = "puppet did not run"
+ module.exit_json(
+ rc=rc, disabled=disabled, msg=msg,
+ error=True, stdout=stdout, stderr=stderr)
+ elif rc == 2:
+ # success with changes
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
+ elif rc == 124:
+ # timeout
+ module.exit_json(
+ rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ # failure
+ module.fail_json(
+ rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
+ stdout=stdout, stderr=stderr)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/system/sefcontext.py b/lib/ansible/modules/extras/system/sefcontext.py
new file mode 100644
index 0000000000..6977ec622e
--- /dev/null
+++ b/lib/ansible/modules/extras/system/sefcontext.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+ - Manages SELinux file context mapping definitions
+ - Similar to the C(semanage fcontext) command
+version_added: "2.2"
+options:
+ target:
+ description:
+ - Target path (expression).
+ required: true
+ default: null
+ aliases: ['path']
+ ftype:
+ description:
+ - File type.
+ required: false
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ required: true
+ default: null
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ required: false
+ default: null
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ required: false
+ default: null
+ aliases: ['serange']
+ state:
+ description:
+ - Desired boolean value.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ required: false
+ default: yes
+notes:
+ - The changes are persistent across reboots
+requirements: [ 'libselinux-python', 'policycoreutils-python' ]
+author: Dag Wieers
+'''
+
+EXAMPLES = '''
+# Allow apache to modify files in /srv/git_repos
+- sefcontext: target='/srv/git_repos(/.*)?' setype=httpd_git_rw_content_t state=present
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+try:
+ import selinux
+ HAVE_SELINUX=True
+except ImportError:
+ HAVE_SELINUX=False
+
+try:
+ import seobject
+ HAVE_SEOBJECT=True
+except ImportError:
+ HAVE_SEOBJECT=False
+
+### Make backward compatible
+option_to_file_type_str = {
+ 'a': 'all files',
+ 'b': 'block device',
+ 'c': 'character device',
+ 'd': 'directory',
+ 'f': 'regular file',
+ 'l': 'symbolic link',
+ 's': 'socket file',
+ 'p': 'named pipe',
+}
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+ record = (target, ftype)
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ target = dict(required=True, aliases=['path']),
+ ftype = dict(required=False, choices=option_to_file_type_str.keys(), default='a'),
+ setype = dict(required=True),
+ seuser = dict(required=False, default=None),
+ selevel = dict(required=False, default=None, aliases=['serange']),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ reload = dict(required=False, type='bool', default='yes'),
+ ),
+ supports_check_mode = True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg="This module requires libselinux-python")
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg="This module requires policycoreutils-python")
+
+ if not selinux.is_selinux_enabled():
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ # Convert file types to (internally used) strings
+ ftype = option_to_file_type_str[ftype]
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/lib/ansible/modules/extras/system/selinux_permissive.py b/lib/ansible/modules/extras/system/selinux_permissive.py
new file mode 100644
index 0000000000..ced9716cc0
--- /dev/null
+++ b/lib/ansible/modules/extras/system/selinux_permissive.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Michael Scherer <misc@zarb.org>
+# inspired by code of github.com/dandiker/
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: selinux_permissive
+short_description: Change permissive domain in SELinux policy
+description:
+ - Add and remove domain from the list of permissive domain.
+version_added: "2.0"
+options:
+ domain:
+ description:
+ - "the domain that will be added or removed from the list of permissive domains"
+ required: true
+ permissive:
+ description:
+ - "indicate if the domain should or should not be set as permissive"
+ required: true
+ choices: [ 'True', 'False' ]
+ no_reload:
+ description:
+ - "automatically reload the policy after a change"
+ - "default is set to 'false' as that's what most people would want after changing one domain"
+ - "Note that this doesn't work on older version of the library (example EL 6), the module will silently ignore it in this case"
+ required: false
+ default: False
+ choices: [ 'True', 'False' ]
+ store:
+ description:
+ - "name of the SELinux policy store to use"
+ required: false
+ default: null
+notes:
+ - Requires a version of SELinux recent enough ( ie EL 6 or newer )
+requirements: [ policycoreutils-python ]
+author: Michael Scherer <misc@zarb.org>
+'''
+
+EXAMPLES = '''
+- selinux_permissive: name=httpd_t permissive=true
+'''
+
+HAVE_SEOBJECT = False
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ pass
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(aliases=['name'], required=True),
+ store=dict(required=False, default=''),
+ permissive=dict(type='bool', required=True),
+ no_reload=dict(type='bool', required=False, default=False),
+ ),
+ supports_check_mode=True
+ )
+
+ # global vars
+ changed = False
+ store = module.params['store']
+ permissive = module.params['permissive']
+ domain = module.params['domain']
+ no_reload = module.params['no_reload']
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(changed=False, msg="policycoreutils-python required for this module")
+
+ try:
+ permissive_domains = seobject.permissiveRecords(store)
+ except ValueError:
+ e = get_exception()
+ module.fail_json(domain=domain, msg=str(e))
+
+ # not supported on EL 6
+ if 'set_reload' in dir(permissive_domains):
+ permissive_domains.set_reload(not no_reload)
+
+ try:
+ all_domains = permissive_domains.get_all()
+ except ValueError:
+ e = get_exception()
+ module.fail_json(domain=domain, msg=str(e))
+
+ if permissive:
+ if domain not in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.add(domain)
+ except ValueError:
+ e = get_exception()
+ module.fail_json(domain=domain, msg=str(e))
+ changed = True
+ else:
+ if domain in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.delete(domain)
+ except ValueError:
+ e = get_exception()
+ module.fail_json(domain=domain, msg=str(e))
+ changed = True
+
+ module.exit_json(changed=changed, store=store,
+ permissive=permissive, domain=domain)
+
+
+
+main()
diff --git a/lib/ansible/modules/extras/system/seport.py b/lib/ansible/modules/extras/system/seport.py
new file mode 100644
index 0000000000..242661a143
--- /dev/null
+++ b/lib/ansible/modules/extras/system/seport.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+
+# (c) 2014, Dan Keder <dan.keder@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: seport
+short_description: Manages SELinux network port type definitions
+description:
+ - Manages SELinux network port type definitions.
+version_added: "2.0"
+options:
+ ports:
+ description:
+ - Ports or port ranges, separated by a comma
+ required: true
+ default: null
+ proto:
+ description:
+ - Protocol for the specified port.
+ required: true
+ default: null
+ choices: [ 'tcp', 'udp' ]
+ setype:
+ description:
+ - SELinux type for the specified port.
+ required: true
+ default: null
+ state:
+ description:
+ - Desired boolean value.
+ required: true
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ required: false
+ default: yes
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux-python', 'policycoreutils-python' ]
+author: Dan Keder
+'''
+
+EXAMPLES = '''
+# Allow Apache to listen on tcp port 8888
+- seport: ports=8888 proto=tcp setype=http_port_t state=present
+# Allow sshd to listen on tcp port 8991
+- seport: ports=8991 proto=tcp setype=ssh_port_t state=present
+# Allow memcached to listen on tcp ports 10000-10100 and 10112
+- seport: ports=10000-10100,10112 proto=tcp setype=memcache_port_t state=present
+'''
+
+try:
+ import selinux
+ HAVE_SELINUX=True
+except ImportError:
+ HAVE_SELINUX=False
+
+try:
+ import seobject
+ HAVE_SEOBJECT=True
+except ImportError:
+ HAVE_SEOBJECT=False
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+
+def semanage_port_get_ports(seport, setype, proto):
+ """ Get the list of ports that have the specified type definition.
+
+ :param seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type()
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
+
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
+
+ :param seport: Instance of seobject.portRecords
+
+ :type port: str
+ :param port: Port or port range (example: "8080", "8080-9090")
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
+ """
+ ports = port.split('-', 1)
+ if len(ports) == 1:
+ ports.extend(ports)
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ if key in records:
+ return records[key]
+ else:
+ return None
+
+
+def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
+ """ Add SELinux port type definition to the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port not in ports_by_type:
+ change = True
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None and not module.check_mode:
+ seport.add(port, proto, serange, setype)
+ elif port_type is not None and not module.check_mode:
+ seport.modify(port, proto, serange, setype)
+
+ except ValueError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except KeyError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except OSError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except RuntimeError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+
+ return change
+
+
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
+ """ Delete SELinux port type definition from the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
+
+ except ValueError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except KeyError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except OSError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+ except RuntimeError:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+
+ return change
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'ports': {
+ 'required': True,
+ },
+ 'proto': {
+ 'required': True,
+ 'choices': ['tcp', 'udp'],
+ },
+ 'setype': {
+ 'required': True,
+ },
+ 'state': {
+ 'required': True,
+ 'choices': ['present', 'absent'],
+ },
+ 'reload': {
+ 'required': False,
+ 'type': 'bool',
+ 'default': 'yes',
+ },
+ },
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg="This module requires libselinux-python")
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg="This module requires policycoreutils-python")
+
+ if not selinux.is_selinux_enabled():
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ ports = [x.strip() for x in str(module.params['ports']).split(',')]
+ proto = module.params['proto']
+ setype = module.params['setype']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'ports': ports,
+ 'proto': proto,
+ 'setype': setype,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
+ elif state == 'absent':
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+main()
diff --git a/lib/ansible/modules/extras/system/solaris_zone.py b/lib/ansible/modules/extras/system/solaris_zone.py
new file mode 100644
index 0000000000..8c8d22305b
--- /dev/null
+++ b/lib/ansible/modules/extras/system/solaris_zone.py
@@ -0,0 +1,456 @@
+#!/usr/bin/python
+
+# (c) 2015, Paul Markham <pmarkham@netrefinery.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import os
+import platform
+import tempfile
+
+DOCUMENTATION = '''
+---
+module: solaris_zone
+short_description: Manage Solaris zones
+description:
+ - Create, start, stop and delete Solaris zones. This module doesn't currently allow
+ changing of options for a zone that's already been created.
+version_added: "2.0"
+author: Paul Markham
+requirements:
+ - Solaris 10 or 11
+options:
+ state:
+ required: true
+ description:
+ - C(present), configure and install the zone.
+ - C(installed), synonym for C(present).
+ - C(running), if the zone already exists, boot it, otherwise, configure and install
+ the zone first, then boot it.
+ - C(started), synonym for C(running).
+ - C(stopped), shutdown a zone.
+ - C(absent), destroy the zone.
+ - C(configured), configure the ready so that it's to be attached.
+ - C(attached), attach a zone, but do not boot it.
+ - C(detached), shutdown and detach a zone
+ choices: ['present', 'installed', 'started', 'running', 'stopped', 'absent', 'configured', 'attached', 'detached']
+ default: present
+ name:
+ description:
+ - Zone name.
+ required: true
+ path:
+ description:
+ - The path where the zone will be created. This is required when the zone is created, but not
+ used otherwise.
+ required: false
+ default: null
+ sparse:
+ description:
+ - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ required: false
+ default: false
+ root_password:
+ description:
+ - The password hash for the root account. If not specified, the zone's root account
+ will not have a password.
+ required: false
+ default: null
+ config:
+ description:
+ - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
+ and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
+ "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
+ required: false
+ default: empty string
+ create_options:
+ description:
+ - 'Extra options to the zonecfg(1M) create command.'
+ required: false
+ default: empty string
+ install_options:
+ description:
+ - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
+ use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
+ required: false
+ default: empty string
+ attach_options:
+ description:
+ - 'Extra options to the zoneadm attach command. For example, this can be used to specify
+ whether a minimum or full update of packages is required and if any packages need to
+ be deleted. For valid values, see zoneadm(1M)'
+ required: false
+ default: empty string
+ timeout:
+ description:
+ - Timeout, in seconds, for zone to boot.
+ required: false
+ default: 600
+'''
+
+EXAMPLES = '''
+# Create and install a zone, but don't boot it
+solaris_zone: name=zone1 state=present path=/zones/zone1 sparse=true root_password="Be9oX7OSwWoU."
+ config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+# Create and install a zone and boot it
+solaris_zone: name=zone1 state=running path=/zones/zone1 root_password="Be9oX7OSwWoU."
+ config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+# Boot an already installed zone
+solaris_zone: name=zone1 state=running
+
+# Stop a zone
+solaris_zone: name=zone1 state=stopped
+
+# Destroy a zone
+solaris_zone: name=zone1 state=absent
+
+# Detach a zone
+solaris_zone: name=zone1 state=detached
+
+# Configure a zone, ready to be attached
+solaris_zone: name=zone1 state=configured path=/zones/zone1 root_password="Be9oX7OSwWoU."
+ config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+# Attach a zone
+solaris_zone: name=zone1 state=attached attach_options='-u'
+'''
+
+class Zone(object):
+ def __init__(self, module):
+ self.changed = False
+ self.msg = []
+
+ self.module = module
+ self.path = self.module.params['path']
+ self.name = self.module.params['name']
+ self.sparse = self.module.params['sparse']
+ self.root_password = self.module.params['root_password']
+ self.timeout = self.module.params['timeout']
+ self.config = self.module.params['config']
+ self.create_options = self.module.params['create_options']
+ self.install_options = self.module.params['install_options']
+ self.attach_options = self.module.params['attach_options']
+
+ self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
+ self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
+ self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
+
+ if self.module.check_mode:
+ self.msg.append('Running in check mode')
+
+ if platform.system() != 'SunOS':
+ self.module.fail_json(msg='This module requires Solaris')
+
+ (self.os_major, self.os_minor) = platform.release().split('.')
+ if int(self.os_minor) < 10:
+ self.module.fail_json(msg='This module requires Solaris 10 or later')
+
+ def configure(self):
+ if not self.path:
+ self.module.fail_json(msg='Missing required argument: path')
+
+ if not self.module.check_mode:
+ t = tempfile.NamedTemporaryFile(delete = False)
+
+ if self.sparse:
+ t.write('create %s\n' % self.create_options)
+ self.msg.append('creating sparse-root zone')
+ else:
+ t.write('create -b %s\n' % self.create_options)
+ self.msg.append('creating whole-root zone')
+
+ t.write('set zonepath=%s\n' % self.path)
+ t.write('%s\n' % self.config)
+ t.close()
+
+ cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
+ os.unlink(t.name)
+
+ self.changed = True
+ self.msg.append('zone configured')
+
+ def install(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
+ if int(self.os_minor) == 10:
+ self.configure_sysid()
+ self.configure_password()
+ self.configure_ssh_keys()
+ self.changed = True
+ self.msg.append('zone installed')
+
+ def uninstall(self):
+ if self.is_installed():
+ if not self.module.check_mode:
+ cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone uninstalled')
+
+ def configure_sysid(self):
+ if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
+ os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
+
+ open('%s/root/noautoshutdown' % self.path, 'w').close()
+
+ node = open('%s/root/etc/nodename' % self.path, 'w')
+ node.write(self.name)
+ node.close()
+
+ id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
+ id.write('1 # System previously configured?\n')
+ id.write('1 # Bootparams succeeded?\n')
+ id.write('1 # System is on a network?\n')
+ id.write('1 # Extended network information gathered?\n')
+ id.write('0 # Autobinder succeeded?\n')
+ id.write('1 # Network has subnets?\n')
+ id.write('1 # root password prompted for?\n')
+ id.write('1 # locale and term prompted for?\n')
+ id.write('1 # security policy in place\n')
+ id.write('1 # NFSv4 domain configured\n')
+ id.write('0 # Auto Registration Configured\n')
+ id.write('vt100')
+ id.close()
+
+ def configure_ssh_keys(self):
+ rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
+ dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
+
+ if not os.path.isfile(rsa_key_file):
+ cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
+
+ if not os.path.isfile(dsa_key_file):
+ cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
+
+ def configure_password(self):
+ shadow = '%s/root/etc/shadow' % self.path
+ if self.root_password:
+ f = open(shadow, 'r')
+ lines = f.readlines()
+ f.close()
+
+ for i in range(0, len(lines)):
+ fields = lines[i].split(':')
+ if fields[0] == 'root':
+ fields[1] = self.root_password
+ lines[i] = ':'.join(fields)
+
+ f = open(shadow, 'w')
+ for line in lines:
+ f.write(line)
+ f.close()
+
+ def boot(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
+
+ """
+ The boot command can return before the zone has fully booted. This is especially
+ true on the first boot when the zone initializes the SMF services. Unless the zone
+ has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
+ Wait until the zone's console login is running; once that's running, consider the zone booted.
+ """
+
+ elapsed = 0
+ while True:
+ if elapsed > self.timeout:
+ self.module.fail_json(msg='timed out waiting for zone to boot')
+ rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
+ if rc == 0:
+ break
+ time.sleep(10)
+ elapsed += 10
+ self.changed = True
+ self.msg.append('zone booted')
+
+ def destroy(self):
+ if self.is_running():
+ self.stop()
+ if self.is_installed():
+ self.uninstall()
+ if not self.module.check_mode:
+ cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone deleted')
+
+ def stop(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone stopped')
+
+ def detach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone detached')
+
+ def attach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone attached')
+
+ def exists(self):
+ cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def is_running(self):
+ return self.status() == 'running'
+
+ def is_installed(self):
+ return self.status() == 'installed'
+
+ def is_configured(self):
+ return self.status() == 'configured'
+
+ def status(self):
+ cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return out.split(':')[2]
+ else:
+ return 'undefined'
+
+ def state_present(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+ self.install()
+
+ def state_running(self):
+ self.state_present()
+ if self.is_running():
+ self.msg.append('zone already running')
+ else:
+ self.boot()
+
+ def state_stopped(self):
+ if self.exists():
+ self.stop()
+ else:
+ self.module.fail_json(msg='zone does not exist')
+
+ def state_absent(self):
+ if self.exists():
+ if self.is_running():
+ self.stop()
+ self.destroy()
+ else:
+ self.msg.append('zone does not exist')
+
+ def state_configured(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+
+ def state_detached(self):
+ if not self.exists():
+ self.module.fail_json(msg='zone does not exist')
+ if self.is_configured():
+ self.msg.append('zone already detached')
+ else:
+ self.stop()
+ self.detach()
+
+ def state_attached(self):
+ if not self.exists():
+ self.msg.append('zone does not exist')
+ if self.is_configured():
+ self.attach()
+ else:
+ self.msg.append('zone already attached')
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(default='present', choices=['running', 'started', 'present', 'installed', 'stopped', 'absent', 'configured', 'detached', 'attached']),
+ path = dict(defalt=None),
+ sparse = dict(default=False, type='bool'),
+ root_password = dict(default=None),
+ timeout = dict(default=600, type='int'),
+ config = dict(default=''),
+ create_options = dict(default=''),
+ install_options = dict(default=''),
+ attach_options = dict(default=''),
+ ),
+ supports_check_mode=True
+ )
+
+ zone = Zone(module)
+
+ state = module.params['state']
+
+ if state == 'running' or state == 'started':
+ zone.state_running()
+ elif state == 'present' or state == 'installed':
+ zone.state_present()
+ elif state == 'stopped':
+ zone.state_stopped()
+ elif state == 'absent':
+ zone.state_absent()
+ elif state == 'configured':
+ zone.state_configured()
+ elif state == 'detached':
+ zone.state_detached()
+ elif state == 'attached':
+ zone.state_attached()
+ else:
+ module.fail_json(msg='Invalid state: %s' % state)
+
+ module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/system/svc.py b/lib/ansible/modules/extras/system/svc.py
new file mode 100755
index 0000000000..e82b0591d5
--- /dev/null
+++ b/lib/ansible/modules/extras/system/svc.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+
+DOCUMENTATION = '''
+---
+module: svc
+author: "Brian Coca (@bcoca)"
+version_added: "1.9"
+short_description: Manage daemontools services.
+description:
+ - Controls daemontools services on remote hosts using the svc utility.
+options:
+ name:
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ required: false
+ choices: [ started, stopped, restarted, reloaded, once ]
+ description:
+ - C(Started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ svc (svc -t) and C(killed) will always bounce the svc (svc -k).
+ C(reloaded) will send a sigusr1 (svc -1).
+ C(once) will run a normally downed svc once (svc -o), not really
+ an idempotent operation.
+ downed:
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ description:
+ - Should a 'down' file exist or not, if it exists it disables auto startup.
+ defaults to no. Downed does not imply stopped.
+ enabled:
+ required: false
+ choices: [ "yes", "no" ]
+ description:
+ - Wheater the service is enabled or not, if disabled it also implies stopped.
+ Make note that a service can be enabled and downed (no auto restart).
+ service_dir:
+ required: false
+ default: /service
+ description:
+ - directory svscan watches for services
+ service_src:
+ required: false
+ description:
+ - directory where services are defined, the source of symlinks to service_dir.
+'''
+
+EXAMPLES = '''
+# Example action to start svc dnscache, if not running
+ - svc: name=dnscache state=started
+
+# Example action to stop svc dnscache, if running
+ - svc: name=dnscache state=stopped
+
+# Example action to kill svc dnscache, in all cases
+ - svc : name=dnscache state=killed
+
+# Example action to restart svc dnscache, in all cases
+ - svc : name=dnscache state=restarted
+
+# Example action to reload svc dnscache, in all cases
+ - svc: name=dnscache state=reloaded
+
+# Example using alt svc directory location
+ - svc: name=dnscache state=reloaded service_dir=/var/service
+'''
+
+import platform
+import shlex
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
+
+def _load_dist_subclass(cls, *args, **kwargs):
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+class Svc(object):
+ """
+ Main class that handles daemontools, can be subclassed and overriden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+
+ #def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+
+
+ def __init__(self, module):
+ self.extra_paths = [ '/command', '/usr/local/bin' ]
+ self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.downed = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
+ self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([ self.service_dir, self.name ])
+ self.src_full = '/'.join([ self.service_src, self.name ])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.downed = os.path.lexists('%s/down' % self.svc_full)
+ self.get_status()
+ else:
+ self.downed = os.path.lexists('%s/down' % self.src_full)
+ self.state = 'stopped'
+
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError:
+ e = get_exception()
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ try:
+ os.unlink(self.svc_full)
+ except OSError:
+ e = get_exception()
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
+ self.execute_command([self.svc_cmd,'-dx',self.src_full])
+
+ src_log = '%s/log' % self.src_full
+ if os.path.exists(src_log):
+ self.execute_command([self.svc_cmd,'-dx',src_log])
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+
+ m = re.search('\(pid (\d+)\)', out)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search('(\d+) seconds', out)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(' up ', out):
+ self.state = 'start'
+ elif re.search(' down ', out):
+ self.state = 'stopp'
+ else:
+ self.state = 'unknown'
+ return
+
+ if re.search(' want ', out):
+ self.state += 'ing'
+ else:
+ self.state += 'ed'
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, '-u', self.svc_full])
+
+ def stopp(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, '-d', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, '-o', self.svc_full])
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, '-1', self.svc_full])
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, '-t', self.svc_full])
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, '-k', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg="failed to execute: %s" % str(e))
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
+ enabled = dict(required=False, type='bool'),
+ downed = dict(required=False, type='bool'),
+ dist = dict(required=False, default='daemontools'),
+ service_dir = dict(required=False, default='/service'),
+ service_src = dict(required=False, default='/etc/service'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+ downed = module.params['downed']
+
+ svc = Svc(module)
+ changed = False
+ orig_state = svc.report()
+
+ if enabled is not None and enabled != svc.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ svc.enable()
+ else:
+ svc.disable()
+ except (OSError, IOError):
+ e = get_exception()
+ module.fail_json(msg="Could change service link: %s" % str(e))
+
+ if state is not None and state != svc.state:
+ changed = True
+ if not module.check_mode:
+ getattr(svc,state[:-2])()
+
+ if downed is not None and downed != svc.downed:
+ changed = True
+ if not module.check_mode:
+ d_file = "%s/down" % svc.svc_full
+ try:
+ if downed:
+ open(d_file, "a").close()
+ else:
+ os.unlink(d_file)
+ except (OSError, IOError):
+ e = get_exception()
+ module.fail_json(msg="Could change downed file: %s " % (str(e)))
+
+ module.exit_json(changed=changed, svc=svc.report())
+
+
+
+
+main()
diff --git a/lib/ansible/modules/extras/system/timezone.py b/lib/ansible/modules/extras/system/timezone.py
new file mode 100644
index 0000000000..2f04801790
--- /dev/null
+++ b/lib/ansible/modules/extras/system/timezone.py
@@ -0,0 +1,462 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Shinichi TAMURA (@tmshn)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from ansible.module_utils.basic import AnsibleModule, get_platform
+from ansible.module_utils.six import iteritems
+
+
+DOCUMENTATION = '''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock
+ and of the hardware clock. I(Currently only Linux platform is supported.)
+ It is recommended to restart C(crond) after changing the timezone,
+ otherwise the jobs may run at the wrong time.
+ It uses the C(timedatectl) command if available. Otherwise, it edits
+ C(/etc/sysconfig/clock) or C(/etc/timezone) for the system clock,
+ and uses the C(hwclock) command for the hardware clock.
+ If you want to set up the NTP, use M(service) module.
+version_added: "2.2.0"
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ Default is to keep current setting.
+ required: false
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ Default is to keep current setting.
+ Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ required: false
+ aliases: ['rtc']
+author: "Shinichi TAMURA (@tmshn)"
+'''
+
+RETURN = '''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: dictionary
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = '''
+- name: set timezone to Asia/Tokyo
+ timezone: name=Asia/Tokyo
+'''
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it need to judge based
+ on whether the `timedatectl` command exists.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if get_platform() == 'Linux':
+ if module.get_bin_path('timedatectl') is not None:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name =re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name ='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name =None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ regexps = dict(
+ name =None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ self.update_timezone = self.module.get_bin_path('cp', required=True)
+ self.update_timezone += ' %s /etc/localtime' % tzfile
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ self.update_timezone = self.module.get_bin_path('dpkg-reconfigure', required=True)
+ self.update_timezone += ' --frontend noninteractive tzdata'
+ self.conf_files['name'] = '/etc/timezone',
+ self.conf_files['hwclock'] = '/etc/default/rcS',
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS
+ if self.module.get_bin_path('tzdata-update') is not None:
+ self.update_timezone = self.module.get_bin_path('tzdata-update', required=True)
+ # else:
+ # self.update_timezone = 'cp ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ self.regexps['name'] = re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ self.tzline_format = 'ZONE="%s"\n'
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+
+ def _edit_file(self, filename, regexp, value):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError:
+ self.abort('cannot read "%s"' % filename)
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('cannot write to "%s"' % filename)
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def get(self, key, phase):
+ if key == 'hwclock' and os.path.isfile('/etc/adjtime'):
+ # If /etc/adjtime exists, use that file.
+ key = 'adjtime'
+
+ filename = self.conf_files[key]
+
+ try:
+ file = open(filename, mode='r')
+ except IOError:
+ self.abort('cannot read configuration file "%s" for %s' % (filename, key))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ self.abort('cannot find the valid value from configuration file "%s" for %s' % (filename, key))
+ else:
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # For key='adjtime'; convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value)
+ self.execute(self.update_timezone)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ else:
+ option = '--utc'
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ arg_spec = dict(
+ hwclock=dict(choices=['UTC', 'local'], aliases=['rtc']),
+ name =dict(),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[arg_spec.keys()],
+ supports_check_mode=True
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made')
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/system/ufw.py b/lib/ansible/modules/extras/system/ufw.py
new file mode 100644
index 0000000000..c692211d12
--- /dev/null
+++ b/lib/ansible/modules/extras/system/ufw.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# (c) 2013, James Martin <jmartin@basho.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+version_added: 1.6
+author:
+ - "Aleksey Ovcharenko (@ovcharenko)"
+ - "Jarno Keskikangas (@pyykkis)"
+ - "Ahti Kitsik (@ahtik)"
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ required: false
+ choices: ['enabled', 'disabled', 'reloaded', 'reset']
+ policy:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ required: false
+ alias: default
+ choices: ['allow', 'deny', 'reject']
+ direction:
+ description:
+ - Select direction for a rule or default policy command.
+ required: false
+ choices: ['in', 'out', 'incoming', 'outgoing', 'routed']
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ choices: ['on', 'off', 'low', 'medium', 'high', 'full']
+ required: false
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM
+ required: false
+ rule:
+ description:
+ - Add firewall rule
+ required: false
+ choices: ['allow', 'deny', 'reject', 'limit']
+ log:
+ description:
+ - Log new connections matched to this rule
+ required: false
+ choices: ['yes', 'no']
+ from_ip:
+ description:
+ - Source IP address.
+ required: false
+ aliases: ['from', 'src']
+ default: 'any'
+ from_port:
+ description:
+ - Source port.
+ required: false
+ to_ip:
+ description:
+ - Destination IP address.
+ required: false
+ aliases: ['to', 'dest']
+ default: 'any'
+ to_port:
+ description:
+ - Destination port.
+ required: false
+ aliases: ['port']
+ proto:
+ description:
+ - TCP/IP protocol.
+ choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']
+ required: false
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d)
+ required: false
+ aliases: ['app']
+ delete:
+ description:
+ - Delete rule.
+ required: false
+ choices: ['yes', 'no']
+ interface:
+ description:
+ - Specify interface for rule.
+ required: false
+ aliases: ['if']
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ required: false
+ choices: ['yes', 'no']
+'''
+
+EXAMPLES = '''
+# Allow everything and enable UFW
+ufw: state=enabled policy=allow
+
+# Set logging
+ufw: logging=on
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+ufw: rule=reject port=auth log=yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+ufw: rule=limit port=ssh proto=tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+ufw: rule=allow name=OpenSSH
+
+# Delete OpenSSH rule
+ufw: rule=allow name=OpenSSH delete=yes
+
+# Deny all access to port 53:
+ufw: rule=deny port=53
+
+# Allow port range 60000-61000
+ufw: rule=allow port=60000:61000
+
+# Allow all access to tcp port 80:
+ufw: rule=allow port=80 proto=tcp
+
+# Allow all access from RFC1918 networks to this host:
+ufw: rule=allow src={{ item }}
+with_items:
+- 10.0.0.0/8
+- 172.16.0.0/12
+- 192.168.0.0/16
+
+# Deny access to udp port 514 from host 1.2.3.4:
+ufw: rule=deny proto=udp src=1.2.3.4 port=514
+
+# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469
+
+# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host.
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+ufw: rule=deny proto=tcp src=2001:db8::/32 port=25
+
+# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24.
+# Can be used to further restrict a global FORWARD policy set to allow
+ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24
+'''
+
+from operator import itemgetter
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']),
+ direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete = dict(default=False, type='bool'),
+ route = dict(default=False, type='bool'),
+ insert = dict(default=None),
+ rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']),
+ interface = dict(default=None, aliases=['if']),
+ log = dict(default=False, type='bool'),
+ from_ip = dict(default='any', aliases=['src', 'from']),
+ from_port = dict(default=None),
+ to_ip = dict(default='any', aliases=['dest', 'to']),
+ to_port = dict(default=None, aliases=['port']),
+ proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']),
+ app = dict(default=None, aliases=['name'])
+ ),
+ supports_check_mode = True,
+ mutually_exclusive = [['app', 'proto', 'logging']]
+ )
+
+ cmds = []
+
+ def execute(cmd):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg=err or out)
+
+ params = module.params
+
+ # Ensure at least one of the command arguments are given
+ command_keys = ['state', 'default', 'rule', 'logging']
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ if len(commands) < 1:
+ module.fail_json(msg="Not any of the command arguments %s given" % commands)
+
+ if(params['interface'] is not None and params['direction'] is None):
+ module.fail_json(msg="Direction must be specified when creating a rule on an interface")
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+
+ # Save the pre state and rules in order to recognize changes
+ (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
+ (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules")
+
+ # Execute commands
+ for (command, value) in commands.iteritems():
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = { 'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset' }
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application]
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([params['insert'], "insert %s" % params['insert']])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s" ), ('from_port', "port %s" ),
+ ('to_ip', "to %s" ), ('to_port', "port %s" ),
+ ('proto', "proto %s"), ('app', "app '%s'")]:
+
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ execute(cmd)
+
+ # Get the new state
+ (_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
+ (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules")
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/system/zfs.py b/lib/ansible/modules/extras/system/zfs.py
new file mode 100644
index 0000000000..0d79569d77
--- /dev/null
+++ b/lib/ansible/modules/extras/system/zfs.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: zfs
+short_description: Manage zfs
+description:
+ - Manages ZFS file systems, volumes, clones and snapshots.
+version_added: "1.1"
+options:
+ name:
+ description:
+ - File system, snapshot or volume name e.g. C(rpool/myfs)
+ required: true
+ state:
+ description:
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: ['present', 'absent']
+ required: true
+ origin:
+ description:
+ - Snapshot from which to create a clone
+ default: null
+ required: false
+ key_value:
+ description:
+ - The C(zfs) module takes key=value pairs for zfs properties to be set. See the zfs(8) man page for more information.
+ default: null
+ required: false
+
+author: "Johan Wiren (@johanwiren)"
+'''
+
+EXAMPLES = '''
+# Create a new file system called myfs in pool rpool with the setuid property turned off
+- zfs: name=rpool/myfs state=present setuid=off
+
+# Create a new volume called myvol in pool rpool.
+- zfs: name=rpool/myvol state=present volsize=10M
+
+# Create a snapshot of rpool/myfs file system.
+- zfs: name=rpool/myfs@mysnapshot state=present
+
+# Create a new file system called myfs2 with snapdir enabled
+- zfs: name=rpool/myfs2 state=present snapdir=enabled
+
+# Create a new file system by cloning a snapshot
+- zfs: name=rpool/cloned_fs state=present origin=rpool/myfs@mysnapshot
+
+# Destroy a filesystem
+- zfs: name=rpool/myfs state=absent
+'''
+
+
+import os
+
+
+class Zfs(object):
+
+ def __init__(self, module, name, properties):
+ self.module = module
+ self.name = name
+ self.properties = properties
+ self.changed = False
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
+
+ def exists(self):
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ properties = self.properties
+ volsize = properties.pop('volsize', None)
+ volblocksize = properties.pop('volblocksize', None)
+ origin = properties.pop('origin', None)
+ cmd = [self.zfs_cmd]
+
+ if "@" in self.name:
+ action = 'snapshot'
+ elif origin:
+ action = 'clone'
+ else:
+ action = 'create'
+
+ cmd.append(action)
+
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+
+ if volsize:
+ cmd += ['-V', volsize]
+ if volblocksize:
+ cmd += ['-b', 'volblocksize']
+ if properties:
+ for prop, value in properties.iteritems():
+ cmd += ['-o', '%s="%s"' % (prop, value)]
+ if origin:
+ cmd.append(origin)
+ cmd.append(self.name)
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_property(self, prop, value):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_properties_if_changed(self):
+ current_properties = self.get_current_properties()
+ for prop, value in self.properties.iteritems():
+ if current_properties.get(prop, None) != value:
+ self.set_property(prop, value)
+
+ def get_current_properties(self):
+ cmd = [self.zfs_cmd, 'get', '-H']
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(" ".join(cmd))
+ properties = dict()
+ for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
+ if source == 'local':
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
+ return properties
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(type='str', required=True),
+ state = dict(type='str', required=True, choices=['present', 'absent']),
+ # No longer used. Kept here to not interfere with zfs properties
+ createparent = dict(type='bool', required=False)
+ ),
+ supports_check_mode=True,
+ check_invalid_arguments=False
+ )
+
+ state = module.params.pop('state')
+ name = module.params.pop('name')
+
+ # Get all valid zfs-properties
+ properties = dict()
+ for prop, value in module.params.iteritems():
+ # All freestyle params are zfs properties
+ if prop not in module.argument_spec:
+ # Reverse the boolification of freestyle zfs properties
+ if type(value) == bool:
+ if value is True:
+ properties[prop] = 'on'
+ else:
+ properties[prop] = 'off'
+ else:
+ properties[prop] = value
+
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ zfs = Zfs(module, name, properties)
+
+ if state == 'present':
+ if zfs.exists():
+ zfs.set_properties_if_changed()
+ else:
+ zfs.create()
+
+ elif state == 'absent':
+ if zfs.exists():
+ zfs.destroy()
+
+ result.update(zfs.properties)
+ result['changed'] = zfs.changed
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/test/integrations/group_vars/all.yml b/lib/ansible/modules/extras/test/integrations/group_vars/all.yml
new file mode 100644
index 0000000000..8a3ccba716
--- /dev/null
+++ b/lib/ansible/modules/extras/test/integrations/group_vars/all.yml
@@ -0,0 +1 @@
+test_subnet_id: 'subnet-123456789'
diff --git a/lib/ansible/modules/extras/test/integrations/roles/ec2_vpc_nat_gateway/tasks/main.yml b/lib/ansible/modules/extras/test/integrations/roles/ec2_vpc_nat_gateway/tasks/main.yml
new file mode 100644
index 0000000000..f5ad5f50fc
--- /dev/null
+++ b/lib/ansible/modules/extras/test/integrations/roles/ec2_vpc_nat_gateway/tasks/main.yml
@@ -0,0 +1,76 @@
+- name: Launching NAT Gateway and allocate a new eip.
+ ec2_vpc_nat_gateway:
+ region: us-west-2
+ state: present
+ subnet_id: "{{ test_subnet_id }}"
+ wait: yes
+ wait_timeout: 600
+ register: nat
+
+- debug:
+ var: nat
+- fail:
+ msg: "Failed to create"
+ when: '"{{ nat["changed"] }}" != "True"'
+
+- name: Launch a new gateway only if one does not exist already in this subnet.
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ region: us-west-2
+ state: present
+ subnet_id: "{{ test_subnet_id }}"
+ wait: yes
+ wait_timeout: 600
+ register: nat_idempotent
+
+- debug:
+ var: nat_idempotent
+- fail:
+ msg: "Failed to be idempotent"
+ when: '"{{ nat_idempotent["changed"] }}" == "True"'
+
+- name: Launching NAT Gateway and allocate a new eip even if one already exists in the subnet.
+ ec2_vpc_nat_gateway:
+ region: us-west-2
+ state: present
+ subnet_id: "{{ test_subnet_id }}"
+ wait: yes
+ wait_timeout: 600
+ register: new_nat
+
+- debug:
+ var: new_nat
+- fail:
+ msg: "Failed to create"
+ when: '"{{ new_nat["changed"] }}" != "True"'
+
+- name: Launching NAT Gateway with allocation id, this call is idempotent and will not create anything.
+ ec2_vpc_nat_gateway:
+ allocation_id: eipalloc-1234567
+ region: us-west-2
+ state: present
+ subnet_id: "{{ test_subnet_id }}"
+ wait: yes
+ wait_timeout: 600
+ register: nat_with_eipalloc
+
+- debug:
+ var: nat_with_eipalloc
+- fail:
+ msg: 'Failed to be idempotent.'
+ when: '"{{ nat_with_eipalloc["changed"] }}" == "True"'
+
+- name: Delete the 1st nat gateway and do not wait for it to finish
+ ec2_vpc_nat_gateway:
+ region: us-west-2
+ nat_gateway_id: "{{ nat.nat_gateway_id }}"
+ state: absent
+
+- name: Delete the nat_with_eipalloc and release the eip
+ ec2_vpc_nat_gateway:
+ region: us-west-2
+ nat_gateway_id: "{{ new_nat.nat_gateway_id }}"
+ release_eip: yes
+ state: absent
+ wait: yes
+ wait_timeout: 600
diff --git a/lib/ansible/modules/extras/test/integrations/site.yml b/lib/ansible/modules/extras/test/integrations/site.yml
new file mode 100644
index 0000000000..62416726eb
--- /dev/null
+++ b/lib/ansible/modules/extras/test/integrations/site.yml
@@ -0,0 +1,3 @@
+- hosts: 127.0.0.1
+ roles:
+ - { role: ec2_vpc_nat_gateway }
diff --git a/lib/ansible/modules/extras/test/unit/cloud/amazon/test_ec2_vpc_nat_gateway.py b/lib/ansible/modules/extras/test/unit/cloud/amazon/test_ec2_vpc_nat_gateway.py
new file mode 100644
index 0000000000..1b75c88a14
--- /dev/null
+++ b/lib/ansible/modules/extras/test/unit/cloud/amazon/test_ec2_vpc_nat_gateway.py
@@ -0,0 +1,486 @@
+#!/usr/bin/python
+
+import boto3
+import unittest
+
+from collections import namedtuple
+from ansible.parsing.dataloader import DataLoader
+from ansible.vars import VariableManager
+from ansible.inventory import Inventory
+from ansible.playbook.play import Play
+from ansible.executor.task_queue_manager import TaskQueueManager
+
+import cloud.amazon.ec2_vpc_nat_gateway as ng
+
+Options = (
+ namedtuple(
+ 'Options', [
+ 'connection', 'module_path', 'forks', 'become', 'become_method',
+ 'become_user', 'remote_user', 'private_key_file', 'ssh_common_args',
+ 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args', 'verbosity',
+ 'check'
+ ]
+ )
+)
+# initialize needed objects
+variable_manager = VariableManager()
+loader = DataLoader()
+options = (
+ Options(
+ connection='local',
+ module_path='cloud/amazon',
+ forks=1, become=None, become_method=None, become_user=None, check=True,
+ remote_user=None, private_key_file=None, ssh_common_args=None,
+ sftp_extra_args=None, scp_extra_args=None, ssh_extra_args=None,
+ verbosity=3
+ )
+)
+passwords = dict(vault_pass='')
+
+aws_region = 'us-west-2'
+
+# create inventory and pass to var manager
+inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost')
+variable_manager.set_inventory(inventory)
+
+def run(play):
+ tqm = None
+ results = None
+ try:
+ tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ options=options,
+ passwords=passwords,
+ stdout_callback='default',
+ )
+ results = tqm.run(play)
+ finally:
+ if tqm is not None:
+ tqm.cleanup()
+ return tqm, results
+
+class AnsibleVpcNatGatewayTasks(unittest.TestCase):
+
+ def test_create_gateway_using_allocation_id(self):
+ play_source = dict(
+ name = "Create new nat gateway with eip allocation-id",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [
+ dict(
+ action=dict(
+ module='ec2_vpc_nat_gateway',
+ args=dict(
+ subnet_id='subnet-12345678',
+ allocation_id='eipalloc-12345678',
+ wait='yes',
+ region=aws_region,
+ )
+ ),
+ register='nat_gateway',
+ ),
+ dict(
+ action=dict(
+ module='debug',
+ args=dict(
+ msg='{{nat_gateway}}'
+ )
+ )
+ )
+ ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+ tqm, results = run(play)
+ self.failUnless(tqm._stats.ok['localhost'] == 2)
+ self.failUnless(tqm._stats.changed['localhost'] == 1)
+
+ def test_create_gateway_using_allocation_id_idempotent(self):
+ play_source = dict(
+ name = "Create new nat gateway with eip allocation-id",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [
+ dict(
+ action=dict(
+ module='ec2_vpc_nat_gateway',
+ args=dict(
+ subnet_id='subnet-123456789',
+ allocation_id='eipalloc-1234567',
+ wait='yes',
+ region=aws_region,
+ )
+ ),
+ register='nat_gateway',
+ ),
+ dict(
+ action=dict(
+ module='debug',
+ args=dict(
+ msg='{{nat_gateway}}'
+ )
+ )
+ )
+ ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+ tqm, results = run(play)
+ self.failUnless(tqm._stats.ok['localhost'] == 2)
+ self.assertFalse(tqm._stats.changed.has_key('localhost'))
+
+ def test_create_gateway_using_eip_address(self):
+ play_source = dict(
+ name = "Create new nat gateway with eip address",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [
+ dict(
+ action=dict(
+ module='ec2_vpc_nat_gateway',
+ args=dict(
+ subnet_id='subnet-12345678',
+ eip_address='55.55.55.55',
+ wait='yes',
+ region=aws_region,
+ )
+ ),
+ register='nat_gateway',
+ ),
+ dict(
+ action=dict(
+ module='debug',
+ args=dict(
+ msg='{{nat_gateway}}'
+ )
+ )
+ )
+ ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+ tqm, results = run(play)
+ self.failUnless(tqm._stats.ok['localhost'] == 2)
+ self.failUnless(tqm._stats.changed['localhost'] == 1)
+
+ def test_create_gateway_using_eip_address_idempotent(self):
+ play_source = dict(
+ name = "Create new nat gateway with eip address",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [
+ dict(
+ action=dict(
+ module='ec2_vpc_nat_gateway',
+ args=dict(
+ subnet_id='subnet-123456789',
+ eip_address='55.55.55.55',
+ wait='yes',
+ region=aws_region,
+ )
+ ),
+ register='nat_gateway',
+ ),
+ dict(
+ action=dict(
+ module='debug',
+ args=dict(
+ msg='{{nat_gateway}}'
+ )
+ )
+ )
+ ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+ tqm, results = run(play)
+ self.failUnless(tqm._stats.ok['localhost'] == 2)
+ self.assertFalse(tqm._stats.changed.has_key('localhost'))
+
+ def test_create_gateway_in_subnet_only_if_one_does_not_exist_already(self):
+ play_source = dict(
+ name = "Create new nat gateway only if one does not exist already",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [
+ dict(
+ action=dict(
+ module='ec2_vpc_nat_gateway',
+ args=dict(
+ if_exist_do_not_create='yes',
+ subnet_id='subnet-123456789',
+ wait='yes',
+ region=aws_region,
+ )
+ ),
+ register='nat_gateway',
+ ),
+ dict(
+ action=dict(
+ module='debug',
+ args=dict(
+ msg='{{nat_gateway}}'
+ )
+ )
+ )
+ ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+ tqm, results = run(play)
+ self.failUnless(tqm._stats.ok['localhost'] == 2)
+ self.assertFalse(tqm._stats.changed.has_key('localhost'))
+
+ def test_delete_gateway(self):
+ play_source = dict(
+ name = "Delete Nat Gateway",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [
+ dict(
+ action=dict(
+ module='ec2_vpc_nat_gateway',
+ args=dict(
+ nat_gateway_id='nat-123456789',
+ state='absent',
+ wait='yes',
+ region=aws_region,
+ )
+ ),
+ register='nat_gateway',
+ ),
+ dict(
+ action=dict(
+ module='debug',
+ args=dict(
+ msg='{{nat_gateway}}'
+ )
+ )
+ )
+ ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+ tqm, results = run(play)
+ self.failUnless(tqm._stats.ok['localhost'] == 2)
+ self.assertTrue(tqm._stats.changed.has_key('localhost'))
+
+class AnsibleEc2VpcNatGatewayFunctions(unittest.TestCase):
+
+ def test_convert_to_lower(self):
+ example = ng.DRY_RUN_GATEWAY_UNCONVERTED
+ converted_example = ng.convert_to_lower(example[0])
+ keys = converted_example.keys()
+ keys.sort()
+ for i in range(len(keys)):
+ if i == 0:
+ self.assertEqual(keys[i], 'create_time')
+ if i == 1:
+ self.assertEqual(keys[i], 'nat_gateway_addresses')
+ gw_addresses_keys = converted_example[keys[i]][0].keys()
+ gw_addresses_keys.sort()
+ for j in range(len(gw_addresses_keys)):
+ if j == 0:
+ self.assertEqual(gw_addresses_keys[j], 'allocation_id')
+ if j == 1:
+ self.assertEqual(gw_addresses_keys[j], 'network_interface_id')
+ if j == 2:
+ self.assertEqual(gw_addresses_keys[j], 'private_ip')
+ if j == 3:
+ self.assertEqual(gw_addresses_keys[j], 'public_ip')
+ if i == 2:
+ self.assertEqual(keys[i], 'nat_gateway_id')
+ if i == 3:
+ self.assertEqual(keys[i], 'state')
+ if i == 4:
+ self.assertEqual(keys[i], 'subnet_id')
+ if i == 5:
+ self.assertEqual(keys[i], 'vpc_id')
+
+ def test_get_nat_gateways(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, err_msg, stream = (
+ ng.get_nat_gateways(client, 'subnet-123456789', check_mode=True)
+ )
+ should_return = ng.DRY_RUN_GATEWAYS
+ self.assertTrue(success)
+ self.assertEqual(stream, should_return)
+
+ def test_get_nat_gateways_no_gateways_found(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, err_msg, stream = (
+ ng.get_nat_gateways(client, 'subnet-1234567', check_mode=True)
+ )
+ self.assertTrue(success)
+ self.assertEqual(stream, [])
+
+ def test_wait_for_status(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, err_msg, gws = (
+ ng.wait_for_status(
+ client, 5, 'nat-123456789', 'available', check_mode=True
+ )
+ )
+ should_return = ng.DRY_RUN_GATEWAYS[0]
+ self.assertTrue(success)
+ self.assertEqual(gws, should_return)
+
+ def test_wait_for_status_to_timeout(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, err_msg, gws = (
+ ng.wait_for_status(
+ client, 2, 'nat-12345678', 'available', check_mode=True
+ )
+ )
+ self.assertFalse(success)
+ self.assertEqual(gws, {})
+
+ def test_gateway_in_subnet_exists_with_allocation_id(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ gws, err_msg = (
+ ng.gateway_in_subnet_exists(
+ client, 'subnet-123456789', 'eipalloc-1234567', check_mode=True
+ )
+ )
+ should_return = ng.DRY_RUN_GATEWAYS
+ self.assertEqual(gws, should_return)
+
+ def test_gateway_in_subnet_exists_with_allocation_id_does_not_exist(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ gws, err_msg = (
+ ng.gateway_in_subnet_exists(
+ client, 'subnet-123456789', 'eipalloc-123', check_mode=True
+ )
+ )
+ should_return = list()
+ self.assertEqual(gws, should_return)
+
+ def test_gateway_in_subnet_exists_without_allocation_id(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ gws, err_msg = (
+ ng.gateway_in_subnet_exists(
+ client, 'subnet-123456789', check_mode=True
+ )
+ )
+ should_return = ng.DRY_RUN_GATEWAYS
+ self.assertEqual(gws, should_return)
+
+ def test_get_eip_allocation_id_by_address(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ allocation_id, _ = (
+ ng.get_eip_allocation_id_by_address(
+ client, '55.55.55.55', check_mode=True
+ )
+ )
+ should_return = 'eipalloc-1234567'
+ self.assertEqual(allocation_id, should_return)
+
+ def test_get_eip_allocation_id_by_address_does_not_exist(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ allocation_id, err_msg = (
+ ng.get_eip_allocation_id_by_address(
+ client, '52.52.52.52', check_mode=True
+ )
+ )
+ self.assertEqual(err_msg, 'EIP 52.52.52.52 does not exist')
+ self.assertIsNone(allocation_id)
+
+ def test_allocate_eip_address(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, err_msg, eip_id = (
+ ng.allocate_eip_address(
+ client, check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_release_address(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, _ = (
+ ng.release_address(
+ client, 'eipalloc-1234567', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_create(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, results = (
+ ng.create(
+ client, 'subnet-123456', 'eipalloc-1234567', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertTrue(changed)
+
+ def test_pre_create(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, results = (
+ ng.pre_create(
+ client, 'subnet-123456', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertTrue(changed)
+
+ def test_pre_create_idemptotent_with_allocation_id(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, results = (
+ ng.pre_create(
+ client, 'subnet-123456789', allocation_id='eipalloc-1234567', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertFalse(changed)
+
+ def test_pre_create_idemptotent_with_eip_address(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, results = (
+ ng.pre_create(
+ client, 'subnet-123456789', eip_address='55.55.55.55', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertFalse(changed)
+
+ def test_pre_create_idemptotent_if_exist_do_not_create(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, results = (
+ ng.pre_create(
+ client, 'subnet-123456789', if_exist_do_not_create=True, check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertFalse(changed)
+
+ def test_delete(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, _ = (
+ ng.remove(
+ client, 'nat-123456789', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertTrue(changed)
+
+ def test_delete_and_release_ip(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, _ = (
+ ng.remove(
+ client, 'nat-123456789', release_eip=True, check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertTrue(changed)
+
+ def test_delete_if_does_not_exist(self):
+ client = boto3.client('ec2', region_name=aws_region)
+ success, changed, err_msg, _ = (
+ ng.remove(
+ client, 'nat-12345', check_mode=True
+ )
+ )
+ self.assertFalse(success)
+ self.assertFalse(changed)
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/test/unit/cloud/amazon/test_kinesis_stream.py b/lib/ansible/modules/extras/test/unit/cloud/amazon/test_kinesis_stream.py
new file mode 100644
index 0000000000..280ec5e2de
--- /dev/null
+++ b/lib/ansible/modules/extras/test/unit/cloud/amazon/test_kinesis_stream.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+
+import boto3
+import unittest
+
+import cloud.amazon.kinesis_stream as kinesis_stream
+
+aws_region = 'us-west-2'
+
+
+class AnsibleKinesisStreamFunctions(unittest.TestCase):
+
+ def test_convert_to_lower(self):
+ example = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test',
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
+ 'StreamStatus': 'ACTIVE'
+ }
+ converted_example = kinesis_stream.convert_to_lower(example)
+ keys = converted_example.keys()
+ keys.sort()
+ for i in range(len(keys)):
+ if i == 0:
+ self.assertEqual(keys[i], 'has_more_shards')
+ if i == 1:
+ self.assertEqual(keys[i], 'retention_period_hours')
+ if i == 2:
+ self.assertEqual(keys[i], 'stream_arn')
+ if i == 3:
+ self.assertEqual(keys[i], 'stream_name')
+ if i == 4:
+ self.assertEqual(keys[i], 'stream_status')
+
+ def test_make_tags_in_aws_format(self):
+ example = {
+ 'env': 'development'
+ }
+ should_return = [
+ {
+ 'Key': 'env',
+ 'Value': 'development'
+ }
+ ]
+ aws_tags = kinesis_stream.make_tags_in_aws_format(example)
+ self.assertEqual(aws_tags, should_return)
+
+ def test_make_tags_in_proper_format(self):
+ example = [
+ {
+ 'Key': 'env',
+ 'Value': 'development'
+ },
+ {
+ 'Key': 'service',
+ 'Value': 'web'
+ }
+ ]
+ should_return = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ proper_tags = kinesis_stream.make_tags_in_proper_format(example)
+ self.assertEqual(proper_tags, should_return)
+
+ def test_recreate_tags_from_list(self):
+ example = [('environment', 'development'), ('service', 'web')]
+ should_return = [
+ {
+ 'Key': 'environment',
+ 'Value': 'development'
+ },
+ {
+ 'Key': 'service',
+ 'Value': 'web'
+ }
+ ]
+ aws_tags = kinesis_stream.recreate_tags_from_list(example)
+ self.assertEqual(aws_tags, should_return)
+
+ def test_get_tags(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg, tags = kinesis_stream.get_tags(client, 'test', True)
+ self.assertTrue(success)
+ should_return = [
+ {
+ 'Key': 'DryRunMode',
+ 'Value': 'true'
+ }
+ ]
+ self.assertEqual(tags, should_return)
+
+ def test_find_stream(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg, stream = (
+ kinesis_stream.find_stream(client, 'test', check_mode=True)
+ )
+ should_return = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test',
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
+ 'StreamStatus': 'ACTIVE'
+ }
+ self.assertTrue(success)
+ self.assertEqual(stream, should_return)
+
+ def test_wait_for_status(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg, stream = (
+ kinesis_stream.wait_for_status(
+ client, 'test', 'ACTIVE', check_mode=True
+ )
+ )
+ should_return = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test',
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
+ 'StreamStatus': 'ACTIVE'
+ }
+ self.assertTrue(success)
+ self.assertEqual(stream, should_return)
+
+ def test_tags_action_create(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ tags = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ success, err_msg = (
+ kinesis_stream.tags_action(
+ client, 'test', tags, 'create', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_tags_action_delete(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ tags = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ success, err_msg = (
+ kinesis_stream.tags_action(
+ client, 'test', tags, 'delete', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_tags_action_invalid(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ tags = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ success, err_msg = (
+ kinesis_stream.tags_action(
+ client, 'test', tags, 'append', check_mode=True
+ )
+ )
+ self.assertFalse(success)
+
+ def test_update_tags(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ tags = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ success, err_msg = (
+ kinesis_stream.update_tags(
+ client, 'test', tags, check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_stream_action_create(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg = (
+ kinesis_stream.stream_action(
+ client, 'test', 10, 'create', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_stream_action_delete(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg = (
+ kinesis_stream.stream_action(
+ client, 'test', 10, 'delete', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_stream_action_invalid(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg = (
+ kinesis_stream.stream_action(
+ client, 'test', 10, 'append', check_mode=True
+ )
+ )
+ self.assertFalse(success)
+
+ def test_retention_action_increase(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg = (
+ kinesis_stream.retention_action(
+ client, 'test', 48, 'increase', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_retention_action_decrease(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg = (
+ kinesis_stream.retention_action(
+ client, 'test', 24, 'decrease', check_mode=True
+ )
+ )
+ self.assertTrue(success)
+
+ def test_retention_action_invalid(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ success, err_msg = (
+ kinesis_stream.retention_action(
+ client, 'test', 24, 'create', check_mode=True
+ )
+ )
+ self.assertFalse(success)
+
+ def test_update(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ current_stream = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test',
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
+ 'StreamStatus': 'ACTIVE'
+ }
+ tags = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ success, changed, err_msg = (
+ kinesis_stream.update(
+ client, current_stream, 'test', retention_period=48,
+ tags=tags, check_mode=True
+ )
+ )
+ self.assertTrue(success)
+ self.assertTrue(changed)
+ self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
+
+ def test_create_stream(self):
+ client = boto3.client('kinesis', region_name=aws_region)
+ tags = {
+ 'env': 'development',
+ 'service': 'web'
+ }
+ success, changed, err_msg, results = (
+ kinesis_stream.create_stream(
+ client, 'test', number_of_shards=10, retention_period=48,
+ tags=tags, check_mode=True
+ )
+ )
+ should_return = {
+ 'has_more_shards': True,
+ 'retention_period_hours': 24,
+ 'stream_name': 'test',
+ 'stream_arn': 'arn:aws:kinesis:east-side:123456789:stream/test',
+ 'stream_status': 'ACTIVE',
+ 'tags': tags,
+ }
+ self.assertTrue(success)
+ self.assertTrue(changed)
+ self.assertEqual(results, should_return)
+ self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/test/utils/shippable/ci.sh b/lib/ansible/modules/extras/test/utils/shippable/ci.sh
new file mode 100755
index 0000000000..5c0f847e66
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/ci.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))")
+
+"${source_root}/test/utils/shippable/${TEST}.sh" 2>&1 | gawk '{ print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush(); }'
diff --git a/lib/ansible/modules/extras/test/utils/shippable/docs-requirements.txt b/lib/ansible/modules/extras/test/utils/shippable/docs-requirements.txt
new file mode 100644
index 0000000000..4e859bb8c7
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/docs-requirements.txt
@@ -0,0 +1,2 @@
+jinja2
+pyyaml
diff --git a/lib/ansible/modules/extras/test/utils/shippable/docs.sh b/lib/ansible/modules/extras/test/utils/shippable/docs.sh
new file mode 100755
index 0000000000..2858f87c99
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+ansible_repo_url="https://github.com/ansible/ansible.git"
+
+build_dir="${SHIPPABLE_BUILD_DIR}"
+repo="${REPO_NAME}"
+
+case "${repo}" in
+ "ansible-modules-core")
+ this_module_group="core"
+ other_module_group="extras"
+ ;;
+ "ansible-modules-extras")
+ this_module_group="extras"
+ other_module_group="core"
+ ;;
+ *)
+ echo "Unsupported repo name: ${repo}"
+ exit 1
+ ;;
+esac
+
+modules_tmp_dir="${build_dir}.tmp"
+this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}"
+other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}"
+
+cd /
+mv "${build_dir}" "${modules_tmp_dir}"
+git clone "${ansible_repo_url}" "${build_dir}"
+cd "${build_dir}"
+rmdir "${this_modules_dir}"
+mv "${modules_tmp_dir}" "${this_modules_dir}"
+mv "${this_modules_dir}/shippable" "${build_dir}"
+git submodule init "${other_modules_dir}"
+git submodule sync "${other_modules_dir}"
+git submodule update "${other_modules_dir}"
+
+pip install -r lib/ansible/modules/${this_module_group}/test/utils/shippable/docs-requirements.txt --upgrade
+pip list
+
+source hacking/env-setup
+
+docs_status=0
+
+PAGER=/bin/cat \
+ ANSIBLE_DEPRECATION_WARNINGS=false \
+ bin/ansible-doc -l \
+ 2>/tmp/ansible-doc.err || docs_status=$?
+
+if [ -s /tmp/ansible-doc.err ]; then
+ # report warnings as errors
+ echo "Output from 'ansible-doc -l' on stderr is considered an error:"
+ cat /tmp/ansible-doc.err
+ exit 1
+fi
+
+if [ "${docs_status}" -ne 0 ]; then
+ echo "Running 'ansible-doc -l' failed with no output on stderr and exit code: ${docs_status}"
+ exit 1
+fi
diff --git a/lib/ansible/modules/extras/test/utils/shippable/integration.sh b/lib/ansible/modules/extras/test/utils/shippable/integration.sh
new file mode 100755
index 0000000000..cf10e681bf
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/integration.sh
@@ -0,0 +1,55 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+ansible_repo_url="https://github.com/ansible/ansible.git"
+
+is_pr="${IS_PULL_REQUEST}"
+build_dir="${SHIPPABLE_BUILD_DIR}"
+repo="${REPO_NAME}"
+
+if [ "${is_pr}" != "true" ]; then
+ echo "Module integration tests are only supported on pull requests."
+ exit 0
+fi
+
+case "${repo}" in
+ "ansible-modules-core")
+ this_module_group="core"
+ other_module_group="extras"
+ ;;
+ "ansible-modules-extras")
+ this_module_group="extras"
+ other_module_group="core"
+ ;;
+ *)
+ echo "Unsupported repo name: ${repo}"
+ exit 1
+ ;;
+esac
+
+modules_tmp_dir="${build_dir}.tmp"
+this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}"
+other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}"
+
+cd /
+mv "${build_dir}" "${modules_tmp_dir}"
+git clone "${ansible_repo_url}" "${build_dir}"
+cd "${build_dir}"
+rmdir "${this_modules_dir}"
+mv "${modules_tmp_dir}" "${this_modules_dir}"
+mv "${this_modules_dir}/shippable" "${build_dir}"
+git submodule init "${other_modules_dir}"
+git submodule sync "${other_modules_dir}"
+git submodule update "${other_modules_dir}"
+
+pip install -r test/utils/shippable/modules/generate-tests-requirements.txt --upgrade
+pip list
+
+source hacking/env-setup
+
+test/utils/shippable/modules/generate-tests "${this_module_group}" --verbose --output /tmp/integration.sh >/dev/null
+
+if [ -f /tmp/integration.sh ]; then
+ /bin/bash -eux /tmp/integration.sh
+fi
diff --git a/lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python24.txt b/lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python24.txt
new file mode 100644
index 0000000000..5e3e5afa8d
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python24.txt
@@ -0,0 +1,14 @@
+/cloud/
+/clustering/consul.*.py
+/clustering/znode.py
+/database/influxdb/
+/database/mssql/
+/monitoring/zabbix.*.py
+/network/f5/
+/notification/pushbullet.py
+/packaging/language/maven_artifact.py
+/packaging/os/dnf.py
+/packaging/os/layman.py
+/remote_management/ipmi/
+/univention/
+/web_infrastructure/letsencrypt.py
diff --git a/lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python3.txt b/lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python3.txt
new file mode 100644
index 0000000000..94524fa872
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/sanity-skip-python3.txt
@@ -0,0 +1,85 @@
+/cloud/amazon/cloudtrail.py
+/cloud/amazon/dynamodb_table.py
+/cloud/amazon/ec2_ami_copy.py
+/cloud/amazon/ec2_customer_gateway.py
+/cloud/amazon/ec2_eni.py
+/cloud/amazon/ec2_eni_facts.py
+/cloud/amazon/ec2_remote_facts.py
+/cloud/amazon/ec2_snapshot_facts.py
+/cloud/amazon/ec2_vol_facts.py
+/cloud/amazon/ec2_vpc_igw.py
+/cloud/amazon/ec2_vpc_nacl.py
+/cloud/amazon/ec2_vpc_net_facts.py
+/cloud/amazon/ec2_vpc_route_table.py
+/cloud/amazon/ec2_vpc_route_table_facts.py
+/cloud/amazon/ec2_vpc_subnet.py
+/cloud/amazon/ec2_vpc_subnet_facts.py
+/cloud/amazon/ecs_cluster.py
+/cloud/amazon/ecs_service.py
+/cloud/amazon/ecs_service_facts.py
+/cloud/amazon/ecs_task.py
+/cloud/amazon/ecs_taskdefinition.py
+/cloud/amazon/route53_facts.py
+/cloud/amazon/route53_health_check.py
+/cloud/amazon/route53_zone.py
+/cloud/amazon/s3_lifecycle.py
+/cloud/amazon/s3_logging.py
+/cloud/amazon/sns_topic.py
+/cloud/amazon/sqs_queue.py
+/cloud/amazon/sts_assume_role.py
+/cloud/amazon/sts_session_token.py
+/cloud/centurylink/clc_aa_policy.py
+/cloud/centurylink/clc_group.py
+/cloud/centurylink/clc_publicip.py
+/cloud/google/gce_img.py
+/cloud/google/gce_tag.py
+/cloud/misc/ovirt.py
+/cloud/misc/proxmox.py
+/cloud/misc/proxmox_template.py
+/cloud/misc/virt.py
+/cloud/misc/virt_net.py
+/cloud/misc/virt_pool.py
+/cloud/profitbricks/profitbricks.py
+/cloud/profitbricks/profitbricks_volume.py
+/cloud/rackspace/rax_clb_ssl.py
+/cloud/xenserver_facts.py
+/clustering/consul.py
+/clustering/consul_acl.py
+/clustering/consul_kv.py
+/clustering/consul_session.py
+/commands/expect.py
+/database/misc/mongodb_parameter.py
+/database/misc/mongodb_user.py
+/database/misc/redis.py
+/database/mysql/mysql_replication.py
+/database/postgresql/postgresql_ext.py
+/database/postgresql/postgresql_lang.py
+/database/vertica/vertica_configuration.py
+/database/vertica/vertica_facts.py
+/database/vertica/vertica_role.py
+/database/vertica/vertica_schema.py
+/database/vertica/vertica_user.py
+/monitoring/bigpanda.py
+/monitoring/boundary_meter.py
+/monitoring/circonus_annotation.py
+/monitoring/datadog_monitor.py
+/monitoring/rollbar_deployment.py
+/monitoring/sensu_check.py
+/monitoring/stackdriver.py
+/monitoring/zabbix_group.py
+/monitoring/zabbix_host.py
+/monitoring/zabbix_hostmacro.py
+/monitoring/zabbix_screen.py
+/network/citrix/netscaler.py
+/network/cloudflare_dns.py
+/network/dnsimple.py
+/network/dnsmadeeasy.py
+/network/f5/bigip_gtm_virtual_server.py
+/network/f5/bigip_gtm_wide_ip.py
+/network/nmcli.py
+/network/openvswitch_bridge.py
+/network/openvswitch_port.py
+/notification/irc.py
+/notification/jabber.py
+/notification/mail.py
+/notification/mqtt.py
diff --git a/lib/ansible/modules/extras/test/utils/shippable/sanity-test-python24.txt b/lib/ansible/modules/extras/test/utils/shippable/sanity-test-python24.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/sanity-test-python24.txt
diff --git a/lib/ansible/modules/extras/test/utils/shippable/sanity.sh b/lib/ansible/modules/extras/test/utils/shippable/sanity.sh
new file mode 100755
index 0000000000..d9234cf052
--- /dev/null
+++ b/lib/ansible/modules/extras/test/utils/shippable/sanity.sh
@@ -0,0 +1,30 @@
+#!/bin/bash -eux
+
+source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))")
+
+install_deps="${INSTALL_DEPS:-}"
+
+cd "${source_root}"
+
+if [ "${install_deps}" != "" ]; then
+ add-apt-repository ppa:fkrull/deadsnakes && apt-get update -qq && apt-get install python2.4 -qq
+
+ apt-add-repository 'deb http://archive.ubuntu.com/ubuntu trusty-backports universe'
+ apt-get update -qq
+ apt-get install shellcheck
+
+ pip install git+https://github.com/ansible/ansible.git@devel#egg=ansible
+ pip install git+https://github.com/sivel/ansible-testing.git#egg=ansible_testing
+fi
+
+python2.4 -m compileall -fq -i "test/utils/shippable/sanity-test-python24.txt"
+python2.4 -m compileall -fq -x "($(printf %s "$(< "test/utils/shippable/sanity-skip-python24.txt"))" | tr '\n' '|')" .
+python2.6 -m compileall -fq .
+python2.7 -m compileall -fq .
+python3.5 -m compileall -fq . -x "($(printf %s "$(< "test/utils/shippable/sanity-skip-python3.txt"))" | tr '\n' '|')"
+
+ANSIBLE_DEPRECATION_WARNINGS=false \
+ ansible-validate-modules --exclude '/utilities/|/shippable(/|$)' .
+
+shellcheck \
+ test/utils/shippable/*.sh
diff --git a/lib/ansible/modules/extras/univention/__init__.py b/lib/ansible/modules/extras/univention/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/univention/__init__.py
diff --git a/lib/ansible/modules/extras/univention/udm_dns_record.py b/lib/ansible/modules/extras/univention/udm_dns_record.py
new file mode 100644
index 0000000000..dab1e13438
--- /dev/null
+++ b/lib/ansible/modules/extras/univention/udm_dns_record.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+)
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ zone:
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ type:
+ required: true
+ choices: [ host_record, alias, ptr_record, srv_record, txt_record ]
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ data:
+ required: false
+ default: []
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if C(state=present)."
+'''
+
+
+EXAMPLES = '''
+# Create a DNS record on a UCS
+- udm_dns_zone: name=www
+ zone=example.com
+ type=host_record
+ data=['a': '192.0.2.1']
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ type = dict(required=True,
+ type='str'),
+ zone = dict(required=True,
+ type='str'),
+ name = dict(required=True,
+ type='str'),
+ data = dict(default=[],
+ type='dict'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['data'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={})(relativeDomainName={}))'.format(zone, name),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'zoneName={},cn=dns,{}'.format(zone, base_dn())
+ dn = 'relativeDomainName={},{}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={})'.format(zone),
+ scope='domain',
+ )
+ obj = umc_module_for_add('dns/{}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ obj['name'] = name
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except BaseException as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {} in {} failed: {}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except BaseException as e:
+ module.fail_json(
+ msg='Removing dns entry {} in {} failed: {}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/univention/udm_dns_zone.py b/lib/ansible/modules/extras/univention/udm_dns_zone.py
new file mode 100644
index 0000000000..baf844b546
--- /dev/null
+++ b/lib/ansible/modules/extras/univention/udm_dns_zone.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ required: true
+ choices: [ forward_zone, reverse_zone ]
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ zone:
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ nameserver:
+ required: false
+ description:
+ - List of appropriate name servers. Required if C(state=present).
+ interfaces:
+ required: false
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if C(state=present).
+
+ refresh:
+ required: false
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ required: false
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ required: false
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ required: false
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ required: false
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ required: false
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+# Create a DNS zone on a UCS
+- udm_dns_zone: zone=example.com
+ type=forward_zone
+ nameserver=['ucs.example.com']
+ interfaces=['192.0.2.1']
+'''
+
+
+RETURN = '''# '''
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60 , 'days'),
+ (60 * 60 , 'hours'),
+ (60 , 'minutes'),
+ (1 , 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ type = dict(required=True,
+ type='str'),
+ zone = dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver = dict(default=[],
+ type='list'),
+ interfaces = dict(default=[],
+ type='list'),
+ refresh = dict(default=3600,
+ type='int'),
+ retry = dict(default=1800,
+ type='int'),
+ expire = dict(default=604800,
+ type='int'),
+ ttl = dict(default=600,
+ type='int'),
+ contact = dict(default='',
+ type='str'),
+ mx = dict(default=[],
+ type='list'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{}'.format(base_dn())
+ dn = 'zoneName={},{}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {} failed: {}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {} failed: {}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/univention/udm_group.py b/lib/ansible/modules/extras/univention/udm_group.py
new file mode 100644
index 0000000000..588c765524
--- /dev/null
+++ b/lib/ansible/modules/extras/univention/udm_group.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+DOCUMENTATION = '''
+---
+module: udm_group
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ description:
+ required: false
+ description:
+ - Group description.
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+'''
+
+
+EXAMPLES = '''
+# Create a POSIX group
+- udm_group: name=g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- udm_group: name=g123m-1A
+ subpath='cn=classes,cn=students,cn=groups'
+ ou=school
+# or
+- udm_group: name=g123m-1A
+ position='cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True,
+ type='str'),
+ description = dict(default=None,
+ type='str'),
+ position = dict(default='',
+ type='str'),
+ ou = dict(default='',
+ type='str'),
+ subpath = dict(default='cn=groups',
+ type='str'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={},'.format(ou)
+ if subpath != '':
+ subpath = '{},'.format(subpath)
+ container = '{}{}{}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={},{}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except:
+ module.fail_json(
+ msg="Creating/editing group {} in {} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except:
+ module.fail_json(
+ msg="Removing group {} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/univention/udm_share.py b/lib/ansible/modules/extras/univention/udm_share.py
new file mode 100644
index 0000000000..fa8639958e
--- /dev/null
+++ b/lib/ansible/modules/extras/univention/udm_share.py
@@ -0,0 +1,617 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+DOCUMENTATION = '''
+---
+module: udm_share
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ name:
+ required: true
+ description:
+ - Name
+ host:
+ required: false
+ default: None
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if C(state=present).
+ path:
+ required: false
+ default: None
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if C(state=present).
+ samba_name:
+ required: false
+ default: None
+ description:
+ - Windows name. Required if C(state=present).
+ aliases: [ sambaName ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ owner:
+ required: false
+ default: 0
+ description:
+ - Directory owner of the share's root directory.
+ group:
+ required: false
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ directorymode:
+ required: false
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ root_squash:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Modify user ID for root user (root squashing).
+ subtree_checking:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Subtree checking.
+ sync:
+ required: false
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ writeable:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - NFS write access.
+ samba_block_size:
+ required: false
+ default: None
+ description:
+ - Blocking size.
+ aliases: [ sambaBlockSize ]
+ samba_blocking_locks:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Blocking locks.
+ aliases: [ sambaBlockingLocks ]
+ samba_browseable:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Show in Windows network environment.
+ aliases: [ sambaBrowseable ]
+ samba_create_mode:
+ required: false
+ default: '0744'
+ description:
+ - File mode.
+ aliases: [ sambaCreateMode ]
+ samba_csc_policy:
+ required: false
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ aliases: [ sambaCscPolicy ]
+ samba_custom_settings:
+ required: false
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ aliases: [ sambaCustomSettings ]
+ samba_directory_mode:
+ required: false
+ default: '0755'
+ description:
+ - Directory mode.
+ aliases: [ sambaDirectoryMode ]
+ samba_directory_security_mode:
+ required: false
+ default: '0777'
+ description:
+ - Directory security mode.
+ aliases: [ sambaDirectorySecurityMode ]
+ samba_dos_filemode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Users with write access may modify permissions.
+ aliases: [ sambaDosFilemode ]
+ samba_fake_oplocks:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Fake oplocks.
+ aliases: [ sambaFakeOplocks ]
+ samba_force_create_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force file mode.
+ aliases: [ sambaForceCreateMode ]
+ samba_force_directory_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force directory mode.
+ aliases: [ sambaForceDirectoryMode ]
+ samba_force_directory_security_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force directory security mode.
+ aliases: [ sambaForceDirectorySecurityMode ]
+ samba_force_group:
+ required: false
+ default: None
+ description:
+ - Force group.
+ aliases: [ sambaForceGroup ]
+ samba_force_security_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force security mode.
+ aliases: [ sambaForceSecurityMode ]
+ samba_force_user:
+ required: false
+ default: None
+ description:
+ - Force user.
+ aliases: [ sambaForceUser ]
+ samba_hide_files:
+ required: false
+ default: None
+ description:
+ - Hide files.
+ aliases: [ sambaHideFiles ]
+ samba_hide_unreadable:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Hide unreadable files/directories.
+ aliases: [ sambaHideUnreadable ]
+ samba_hosts_allow:
+ required: false
+ default: []
+ description:
+ - Allowed host/network.
+ aliases: [ sambaHostsAllow ]
+ samba_hosts_deny:
+ required: false
+ default: []
+ description:
+ - Denied host/network.
+ aliases: [ sambaHostsDeny ]
+ samba_inherit_acls:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Inherit ACLs.
+ aliases: [ sambaInheritAcls ]
+ samba_inherit_owner:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Create files/directories with the owner of the parent directory.
+ aliases: [ sambaInheritOwner ]
+ samba_inherit_permissions:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Create files/directories with permissions of the parent directory.
+ aliases: [ sambaInheritPermissions ]
+ samba_invalid_users:
+ required: false
+ default: None
+ description:
+ - Invalid users or groups.
+ aliases: [ sambaInvalidUsers ]
+ samba_level_2_oplocks:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Level 2 oplocks.
+ aliases: [ sambaLevel2Oplocks ]
+ samba_locking:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Locking.
+ aliases: [ sambaLocking ]
+ samba_msdfs_root:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - MSDFS root.
+ aliases: [ sambaMSDFSRoot ]
+ samba_nt_acl_support:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - NT ACL support.
+ aliases: [ sambaNtAclSupport ]
+ samba_oplocks:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Oplocks.
+ aliases: [ sambaOplocks ]
+ samba_postexec:
+ required: false
+ default: None
+ description:
+ - Postexec script.
+ aliases: [ sambaPostexec ]
+ samba_preexec:
+ required: false
+ default: None
+ description:
+ - Preexec script.
+ aliases: [ sambaPreexec ]
+ samba_public:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Allow anonymous read-only access with a guest user.
+ aliases: [ sambaPublic ]
+ samba_security_mode:
+ required: false
+ default: '0777'
+ description:
+ - Security mode.
+ aliases: [ sambaSecurityMode ]
+ samba_strict_locking:
+ required: false
+ default: 'Auto'
+ description:
+ - Strict locking.
+ aliases: [ sambaStrictLocking ]
+ samba_vfs_objects:
+ required: false
+ default: None
+ description:
+ - VFS objects.
+ aliases: [ sambaVFSObjects ]
+ samba_valid_users:
+ required: false
+ default: None
+ description:
+ - Valid users or groups.
+ aliases: [ sambaValidUsers ]
+ samba_write_list:
+ required: false
+ default: None
+ description:
+ - Restrict write access to these users/groups.
+ aliases: [ sambaWriteList ]
+ samba_writeable:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Samba write access.
+ aliases: [ sambaWriteable ]
+ nfs_hosts:
+ required: false
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ nfs_custom_settings:
+ required: false
+ default: []
+ description:
+ - Option name in exports file.
+ aliases: [ nfsCustomSettings ]
+'''
+
+
+EXAMPLES = '''
+# Create a share named home on the server ucs.example.com with the path /home.
+- udm_share: name=home
+ path=/home
+ host=ucs.example.com
+ sambaName=Home
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True,
+ type='str'),
+ ou = dict(required=True,
+ type='str'),
+ owner = dict(type='str',
+ default='0'),
+ group = dict(type='str',
+ default='0'),
+ path = dict(type='path',
+ default=None),
+ directorymode = dict(type='str',
+ default='00755'),
+ host = dict(type='str',
+ default=None),
+ root_squash = dict(type='bool',
+ default=True),
+ subtree_checking = dict(type='bool',
+ default=True),
+ sync = dict(type='str',
+ default='sync'),
+ writeable = dict(type='bool',
+ default=True),
+ sambaBlockSize = dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks = dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable = dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode = dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy = dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings = dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode = dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode = dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode = dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks = dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode = dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode = dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode = dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup = dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode = dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser = dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles = dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable = dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow = dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny = dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls = dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner = dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions = dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers = dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks = dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking = dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot = dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName = dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport = dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks = dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec = dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec = dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic = dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode = dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking = dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects = dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers = dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList = dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable = dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts = dict(type='list',
+ default=[]),
+ nfsCustomSettings = dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={},{}'.format(module.params['ou'], base_dn())
+ dn = 'cn={},{}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{} ({})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except BaseException as err:
+ module.fail_json(
+ msg='Creating/editing share {} in {} failed: {}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except BaseException as err:
+ module.fail_json(
+ msg='Removing share {} in {} failed: {}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/univention/udm_user.py b/lib/ansible/modules/extras/univention/udm_user.py
new file mode 100644
index 0000000000..2eed02a2c0
--- /dev/null
+++ b/lib/ansible/modules/extras/univention/udm_user.py
@@ -0,0 +1,591 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+from datetime import date
+import crypt
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+from dateutil.relativedelta import relativedelta
+
+
+DOCUMENTATION = '''
+---
+module: udm_user
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ firstname:
+ required: false
+ description:
+ - First name. Required if C(state=present).
+ lastname:
+ required: false
+ description:
+ - Last name. Required if C(state=present).
+ password:
+ required: false
+ default: None
+ description:
+ - Password. Required if C(state=present).
+ birthday:
+ required: false
+ default: None
+ description:
+ - Birthday
+ city:
+ required: false
+ default: None
+ description:
+ - City of users business address.
+ country:
+ required: false
+ default: None
+ description:
+ - Country of users business address.
+ department_number:
+ required: false
+ default: None
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ description:
+ required: false
+ default: None
+ description:
+ - Description (not gecos)
+ display_name:
+ required: false
+ default: None
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ email:
+ required: false
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ employee_number:
+ required: false
+ default: None
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ employee_type:
+ required: false
+ default: None
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ gecos:
+ required: false
+ default: None
+ description:
+ - GECOS
+ groups:
+ required: false
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ home_share:
+ required: false
+ default: None
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ home_share_path:
+ required: false
+ default: None
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ home_telephone_number:
+ required: false
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ homedrive:
+ required: false
+ default: None
+ description:
+ - Windows home drive, e.g. C("H:").
+ mail_alternative_address:
+ required: false
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ mail_home_server:
+ required: false
+ default: None
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ mail_primary_address:
+ required: false
+ default: None
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ mobile_telephone_number:
+ required: false
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ organisation:
+ required: false
+ default: None
+ description:
+ - Organisation
+ override_pw_history:
+ required: false
+ default: False
+ description:
+ - Override password history
+ aliases: [ overridePWHistory ]
+ override_pw_length:
+ required: false
+ default: False
+ description:
+ - Override password check
+ aliases: [ overridePWLength ]
+ pager_telephonenumber:
+ required: false
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ phone:
+ required: false
+ default: []
+ description:
+ - List of telephone numbers.
+ postcode:
+ required: false
+ default: None
+ description:
+ - Postal code of users business address.
+ primary_group:
+ required: false
+ default: cn=Domain Users,cn=groups,$LDAP_BASE_DN
+ description:
+ - Primary group. This must be the group LDAP DN.
+ aliases: [ primaryGroup ]
+ profilepath:
+ required: false
+ default: None
+ description:
+ - Windows profile directory
+ pwd_change_next_login:
+ required: false
+ default: None
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ room_number:
+ required: false
+ default: None
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ samba_privileges:
+ required: false
+ default: []
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ samba_user_workstations:
+ required: false
+ default: []
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ sambahome:
+ required: false
+ default: None
+ description:
+ - Windows home path, e.g. C('\\\\$FQDN\\$USERNAME').
+ scriptpath:
+ required: false
+ default: None
+ description:
+ - Windows logon script.
+ secretary:
+ required: false
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ serviceprovider:
+ required: false
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ shell:
+ required: false
+ default: '/bin/bash'
+ description:
+ - Login shell
+ street:
+ required: false
+ default: None
+ description:
+ - Street of users business address.
+ title:
+ required: false
+ default: None
+ description:
+ - Title, e.g. C(Prof.).
+ unixhome:
+ required: false
+ default: '/home/$USERNAME'
+ description:
+ - Unix home directory
+ userexpiry:
+ required: false
+ default: Today + 1 year
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ position:
+ required: false
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ ou:
+ required: false
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ subpath:
+ required: false
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+'''
+
+
+EXAMPLES = '''
+# Create a user on a UCS
+- udm_user: name=FooBar
+ password=secure_password
+ firstname=Foo
+ lastname=Bar
+
+# Create a user with the DN
+# C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+- udm_user: name=foo
+ password=secure_password
+ firstname=Foo
+ lastname=Bar
+ ou=school
+ subpath='cn=teachers,cn=users'
+# or define the position
+- udm_user: name=foo
+ password=secure_password
+ firstname=Foo
+ lastname=Bar
+ position='cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ expiry = date.strftime(date.today() + relativedelta(years=1), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec = dict(
+ birthday = dict(default=None,
+ type='str'),
+ city = dict(default=None,
+ type='str'),
+ country = dict(default=None,
+ type='str'),
+ department_number = dict(default=None,
+ type='str',
+ aliases=['departmentNumber']),
+ description = dict(default=None,
+ type='str'),
+ display_name = dict(default=None,
+ type='str',
+ aliases=['displayName']),
+ email = dict(default=[''],
+ type='list'),
+ employee_number = dict(default=None,
+ type='str',
+ aliases=['employeeNumber']),
+ employee_type = dict(default=None,
+ type='str',
+ aliases=['employeeType']),
+ firstname = dict(default=None,
+ type='str'),
+ gecos = dict(default=None,
+ type='str'),
+ groups = dict(default=[],
+ type='list'),
+ home_share = dict(default=None,
+ type='str',
+ aliases=['homeShare']),
+ home_share_path = dict(default=None,
+ type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number = dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive = dict(default=None,
+ type='str'),
+ lastname = dict(default=None,
+ type='str'),
+ mail_alternative_address= dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server = dict(default=None,
+ type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address = dict(default=None,
+ type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number = dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation = dict(default=None,
+ type='str'),
+ overridePWHistory = dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength = dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber = dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password = dict(default=None,
+ type='str',
+ no_log=True),
+ phone = dict(default=[],
+ type='list'),
+ postcode = dict(default=None,
+ type='str'),
+ primary_group = dict(default=None,
+ type='str',
+ aliases=['primaryGroup']),
+ profilepath = dict(default=None,
+ type='str'),
+ pwd_change_next_login = dict(default=None,
+ type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number = dict(default=None,
+ type='str',
+ aliases=['roomNumber']),
+ samba_privileges = dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations = dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome = dict(default=None,
+ type='str'),
+ scriptpath = dict(default=None,
+ type='str'),
+ secretary = dict(default=[],
+ type='list'),
+ serviceprovider = dict(default=[''],
+ type='list'),
+ shell = dict(default='/bin/bash',
+ type='str'),
+ street = dict(default=None,
+ type='str'),
+ title = dict(default=None,
+ type='str'),
+ unixhome = dict(default=None,
+ type='str'),
+ userexpiry = dict(default=expiry,
+ type='str'),
+ username = dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position = dict(default='',
+ type='str'),
+ ou = dict(default='',
+ type='str'),
+ subpath = dict(default='cn=users',
+ type='str'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={},'.format(ou)
+ if subpath != '':
+ subpath = '{},'.format(subpath)
+ container = '{}{}{}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={},{}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{} {}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ else:
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except:
+ module.fail_json(
+ msg="Creating/editing user {} in {} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except:
+ module.fail_json(
+ msg="Adding groups to user {} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except:
+ module.fail_json(
+ msg="Removing user {} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/__init__.py b/lib/ansible/modules/extras/web_infrastructure/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/__init__.py
diff --git a/lib/ansible/modules/extras/web_infrastructure/apache2_mod_proxy.py b/lib/ansible/modules/extras/web_infrastructure/apache2_mod_proxy.py
new file mode 100644
index 0000000000..0117c118bb
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/apache2_mod_proxy.py
@@ -0,0 +1,429 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+version_added: "2.2"
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+options:
+ balancer_url_suffix:
+ default: /balancer-manager/
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ required: false
+ balancer_vhost:
+ default: None
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ default: None
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ required: false
+ state:
+ default: None
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ required: false
+ choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]
+ tls:
+ default: false
+ description:
+ - Use https to access balancer management page.
+ choices: ["true", "false"]
+ validate_certs:
+ default: true
+ description:
+ - Validate ssl/tls certificates.
+ choices: ["true", "false"]
+'''
+
+EXAMPLES = '''
+# Get all current balancer pool members' attributes:
+- apache2_mod_proxy: balancer_vhost=10.0.0.2
+
+# Get a specific member's attributes:
+- apache2_mod_proxy: balancer_vhost=myws.mydomain.org balancer_suffix="/lb/" member_host=node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- apache2_mod_proxy: balancer_vhost="{{ myloadbalancer_host }}"
+ register: result
+- apache2_mod_proxy: balancer_vhost="{{ myloadbalancer_host }}" member_host="{{ item.host }}" state=present
+ with_items: "{{ result.members }}"
+
+# Gracefully disable a member from a loadbalancer node:
+- apache2_mod_proxy: balancer_vhost="{{ vhost_host }}" member_host="{{ member.host }}" state=drained delegate_to=myloadbalancernode
+- wait_for: host="{{ member.host }}" port={{ member.port }} state=drained delegate_to=myloadbalancernode
+- apache2_mod_proxy: balancer_vhost="{{ vhost_host }}" member_host="{{ member.host }}" state=absent delegate_to=myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"Server Version: Apache/([\d.]+) \(([\w]+)\)"
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ try:
+ assert balancer_member_page[1]['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled':'Dis',
+ 'drained':'Drn',
+ 'hot_standby':'Stby',
+ 'ignore_errors':'Ign'}
+ status = {}
+ actual_status = str(self.attributes['Status'])
+ for mode in status_mapping.keys():
+ if re.search(pattern=status_mapping[mode], string=actual_status):
+ status[mode] = True
+ else:
+ status[mode] = False
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled':'&w_status_D',
+ 'drained':'&w_status_N',
+ 'hot_standby':'&w_status_H',
+ 'ignore_errors':'&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ for k in values_mapping.keys():
+ if values[str(k)]:
+ request_body = request_body + str(values_mapping[k]) + '=1'
+ else:
+ request_body = request_body + str(values_mapping[k]) + '=0'
+
+ response = fetch_url(self.module, self.management_url, data=str(request_body))
+ try:
+ assert response[1]['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = str(str('https://') + str(host))
+ self.url = str(str('https://') + str(host) + str(suffix))
+ else:
+ self.base_url = str(str('http://') + str(host))
+ self.url = str(str('http://') + str(host) + str(suffix))
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ try:
+ assert page[1]['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content, APACHE_VERSION_EXPRESSION, 1)
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ try:
+ assert balancer_member_suffix is not ''
+ except AssertionError:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, default=None, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg="python module 'BeautifulSoup' is required!")
+
+ if module.params['state'] != None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'.")
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors':False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/deploy_helper.py b/lib/ansible/modules/extras/web_infrastructure/deploy_helper.py
new file mode 100644
index 0000000000..b956e38d26
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/deploy_helper.py
@@ -0,0 +1,478 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+version_added: "2.0"
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ required: False
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
+
+ release:
+ required: False
+ default: None
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ required: False
+ default: releases
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+
+ shared_path:
+ required: False
+ default: shared
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+
+ current_path:
+ required: False
+ default: current
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+
+ unfinished_filename:
+ required: False
+ default: DEPLOY_UNFINISHED
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+
+ clean:
+ required: False
+ default: True
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+
+ keep_releases:
+ required: False
+ default: 5
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+root:
+ releases:
+ - 20140415234508
+ - 20140415235146
+ - 20140416082818
+
+ shared:
+ - sessions
+ - uploads
+
+ current: -> releases/20140416082818
+
+
+The 'releases' folder holds all the available releases. A release is a complete build of the application being
+deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+git tags or commit hashes.
+
+During a deploy, a new folder should be created in the releases folder and any build steps required should be
+performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+with a link to this build.
+
+The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+
+The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+release is reduced to the time it takes to switch the link.
+
+To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+procedure to remove it during cleanup.
+
+
+# Typical usage:
+- name: Initialize the deploy root and gather facts
+ deploy_helper: path=/path/to/root
+- name: Clone the project to the new release folder
+ git: repo=git://foosball.example.org/path/to/repo.git dest={{ deploy_helper.new_release_path }} version=v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ file: path={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }} state=touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer: command=install working_dir={{ deploy_helper.new_release_path }}
+- name: Create some folders in the shared folder
+ file: path='{{ deploy_helper.shared_path }}/{{ item }}' state=directory
+ with_items: ['sessions', 'uploads']
+- name: Add symlinks from the new release to the shared folder
+ file: path='{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src='{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state=link
+ with_items:
+ - { path: "app/sessions", src: "sessions" }
+ - { path: "web/uploads", src: "uploads" }
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ deploy_helper: path=/path/to/root release={{ deploy_helper.new_release }} state=finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ deploy_helper: path=/path/to/root state=query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ deploy_helper: path=/path/to/root release={{ deploy_helper.new_release }} state=present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- deploy_helper: path=/path/to/root
+ releases_path=/var/www/project/releases
+ shared_path=/var/www/shared
+ current_path=/var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- deploy_helper: path=/path/to/root release=v1.1.1 state=present
+- deploy_helper: path=/path/to/root release={{ deploy_helper.new_release }} state=finalize
+
+# Using a different unfinished_filename:
+- deploy_helper: path=/path/to/root
+ unfinished_filename=README.md
+ release={{ deploy_helper.new_release }}
+ state=finalize
+
+# Postponing the cleanup of older builds:
+- deploy_helper: path=/path/to/root release={{ deploy_helper.new_release }} state=finalize clean=False
+- deploy_helper: path=/path/to/root state=clean
+# Or running the cleanup ahead of the new deploy
+- deploy_helper: path=/path/to/root state=clean
+- deploy_helper: path=/path/to/root state=present
+
+# Keeping more old releases:
+- deploy_helper: path=/path/to/root release={{ deploy_helper.new_release }} state=finalize keep_releases=10
+# Or, if you use 'clean=false' on finalize:
+- deploy_helper: path=/path/to/root state=clean keep_releases=10
+
+# Removing the entire project root folder
+- deploy_helper: path=/path/to/root state=absent
+
+# Debugging the facts returned by the module
+- deploy_helper: path=/path/to/root
+- debug: var=deploy_helper
+
+'''
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ module.params['path'] = os.path.expanduser(module.params['path'])
+
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ new_release_path = os.path.join(releases_path, self.release)
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg="rmtree failed: %s" % str(e))
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [ f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path,f)) ]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort( key=lambda x: os.path.getctime(os.path.join(releases_path,x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(aliases=['dest'], required=True, type='str'),
+ release = dict(required=False, type='str', default=None),
+ releases_path = dict(required=False, type='str', default='releases'),
+ shared_path = dict(required=False, type='str', default='shared'),
+ current_path = dict(required=False, type='str', default='current'),
+ keep_releases = dict(required=False, type='int', default=5),
+ clean = dict(required=False, type='bool', default=True),
+ unfinished_filename = dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state = dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args = True,
+ supports_check_mode = True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = { 'deploy_helper': facts }
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = { 'deploy_helper': facts }
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = { 'deploy_helper': [] }
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py b/lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py
new file mode 100644
index 0000000000..e89918a248
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+DOCUMENTATION = '''
+---
+module: ejabberd_user
+version_added: "1.5"
+author: "Peter Sprygada (@privateip)"
+short_description: Manages users for ejabberd servers
+requirements:
+ - ejabberd with mod_admin_extra
+description:
+ - This module provides user management for ejabberd servers
+options:
+ username:
+ description:
+ - the name of the user to manage
+ required: true
+ host:
+ description:
+ - the ejabberd host associated with this username
+ required: true
+ password:
+ description:
+ - the password to assign to the username
+ required: false
+ logging:
+ description:
+ - enables or disables the local syslog facility for this module
+ required: false
+ default: false
+ choices: [ 'true', 'false', 'yes', 'no' ]
+ state:
+ description:
+ - describe the desired state of the user to be managed
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+notes:
+ - Password parameter is required for state == present only
+ - Passwords must be stored in clear text for this release
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+'''
+EXAMPLES = '''
+Example playbook entries using the ejabberd_user module to manage users state.
+
+ tasks:
+
+ - name: create a user if it does not exists
+ action: ejabberd_user username=test host=server password=password
+
+ - name: delete a user if it exists
+ action: ejabberd_user username=test host=server state=absent
+'''
+import syslog
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
+
+class EjabberdUserException(Exception):
+ """ Base exeption for EjabberdUser class object """
+ pass
+
+class EjabberdUser(object):
+ """ This object represents a user resource for an ejabberd server. The
+ object manages user creation and deletion using ejabberdctl. The following
+ commands are currently supported:
+ * ejabberdctl register
+ * ejabberdctl deregister
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.logging = module.params.get('logging')
+ self.state = module.params.get('state')
+ self.host = module.params.get('host')
+ self.user = module.params.get('username')
+ self.pwd = module.params.get('password')
+
+ @property
+ def changed(self):
+ """ This method will check the current user and see if the password has
+ changed. It will return True if the user does not match the supplied
+ credentials and False if it does not
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('check_password', options)
+ except EjabberdUserException:
+ e = get_exception()
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return rc
+
+ @property
+ def exists(self):
+ """ This method will check to see if the supplied username exists for
+ host specified. If the user exists True is returned, otherwise False
+ is returned
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('check_account', options)
+ except EjabberdUserException:
+ e = get_exception()
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return not bool(int(rc))
+
+ def log(self, entry):
+ """ This method will log information to the local syslog facility """
+ if self.logging:
+ syslog.openlog('ansible-%s' % self.module._name)
+ syslog.syslog(syslog.LOG_NOTICE, entry)
+
+ def run_command(self, cmd, options):
+ """ This method will run the any command specified and return the
+ returns using the Ansible common module
+ """
+ if not all(options):
+ raise EjabberdUserException
+
+ cmd = 'ejabberdctl %s ' % cmd
+ cmd += " ".join(options)
+ self.log('command: %s' % cmd)
+ return self.module.run_command(cmd.split())
+
+ def update(self):
+ """ The update method will update the credentials for the user provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('change_password', options)
+ except EjabberdUserException:
+ e = get_exception()
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def create(self):
+ """ The create method will create a new user on the host with the
+ password provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('register', options)
+ except EjabberdUserException:
+ e = get_exception()
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def delete(self):
+ """ The delete method will delete the user from the host
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('unregister', options)
+ except EjabberdUserException:
+ e = get_exception()
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ host=dict(default=None, type='str'),
+ username=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ logging=dict(default=False, type='bool')
+ ),
+ supports_check_mode = True
+ )
+
+ obj = EjabberdUser(module)
+
+ rc = None
+ result = dict()
+
+ if obj.state == 'absent':
+ if obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.delete()
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ elif obj.state == 'present':
+ if not obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.create()
+ elif obj.changed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.update()
+ if rc is not None and rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/jboss.py b/lib/ansible/modules/extras/web_infrastructure/jboss.py
new file mode 100644
index 0000000000..9ec67b7c7b
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/jboss.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+module: jboss
+version_added: "1.4"
+short_description: deploy applications to JBoss
+description:
+ - Deploy applications to JBoss standalone using the filesystem
+options:
+ deployment:
+ required: true
+ description:
+ - The name of the deployment
+ src:
+ required: false
+ description:
+ - The remote path of the application ear or war to deploy
+ deploy_path:
+ required: false
+ default: /var/lib/jbossas/standalone/deployments
+ description:
+ - The location in the filesystem where the deployment scanner listens
+ state:
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the application should be deployed or undeployed
+notes:
+ - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
+ - "Ensure no identically named application is deployed through the JBoss CLI"
+author: "Jeroen Hoekx (@jhoekx)"
+"""
+
+EXAMPLES = """
+# Deploy a hello world application
+- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present
+# Update the hello world application
+- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present
+# Undeploy the hello world application
+- jboss: deployment=hello.war state=absent
+"""
+
+import os
+import shutil
+import time
+
+def is_deployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment)))
+
+def is_undeployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment)))
+
+def is_failed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment)))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ src=dict(),
+ deployment=dict(required=True),
+ deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ ),
+ )
+
+ changed = False
+
+ src = module.params['src']
+ deployment = module.params['deployment']
+ deploy_path = module.params['deploy_path']
+ state = module.params['state']
+
+ if state == 'present' and not src:
+ module.fail_json(msg="Argument 'src' required.")
+
+ if not os.path.exists(deploy_path):
+ module.fail_json(msg="deploy_path does not exist.")
+
+ deployed = is_deployed(deploy_path, deployment)
+
+ if state == 'present' and not deployed:
+ if not os.path.exists(src):
+ module.fail_json(msg='Source file %s does not exist.'%(src))
+ if is_failed(deploy_path, deployment):
+ ### Clean up old failed deployment
+ os.remove(os.path.join(deploy_path, "%s.failed"%(deployment)))
+
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.'%(deployment))
+ time.sleep(1)
+ changed = True
+
+ if state == 'present' and deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ deployed = False
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.'%(deployment))
+ time.sleep(1)
+ changed = True
+
+ if state == 'absent' and deployed:
+ os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
+ while deployed:
+ deployed = not is_undeployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Undeploying %s failed.'%(deployment))
+ time.sleep(1)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/jenkins_job.py b/lib/ansible/modules/extras/web_infrastructure/jenkins_job.py
new file mode 100644
index 0000000000..af5a28c3e9
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/jenkins_job.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+ - "lxml >= 3.3.3"
+version_added: "2.2"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+options:
+ config:
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mututally exclusive with C(enabled).
+ - Considered if C(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mututally exclusive with C(config).
+ - Considered if C(state=present).
+ required: false
+ name:
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ description:
+ - Url where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+'''
+
+EXAMPLES = '''
+# Create a jenkins job using basic authentication
+- jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: "http://localhost:8080"
+ user: admin
+
+# Create a jenkins job using the token
+- jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: "http://localhost:8080"
+ user: admin
+
+# Delete a jenkins job using basic authentication
+- jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: "http://localhost:8080"
+ user: admin
+
+# Delete a jenkins job using the token
+- jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: "http://localhost:8080"
+ user: admin
+
+# Disable a jenkins job using basic authentication
+- jenkins_job:
+ name: test
+ password: admin
+ enabled: false
+ url: "http://localhost:8080"
+ user: admin
+
+# Disable a jenkins job using the token
+- jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: false
+ url: "http://localhost:8080"
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: string
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: string
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: string
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: string
+ sample: https://jenkins.mydomain.com
+'''
+
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ python_jenkins_installed = False
+
+try:
+ from lxml import etree as ET
+ python_lxml_installed = True
+except ImportError:
+ python_lxml_installed = False
+
+class JenkinsJob:
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % str(e))
+
+ def get_job_status(self):
+ try:
+ return self.server.get_job_info(self.name)['color'].encode('utf-8')
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to fetch job information, %s' % str(e))
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (str(e), self.jenkins_url))
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ if ( (self.enabled == False and status != "disabled") or (self.enabled == True and status == "disabled") ):
+ return True
+ return False
+
+ def switch_state(self):
+ if self.enabled == False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif self.has_state_changed(status):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (str(e), self.jenkins_url))
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (str(e), self.jenkins_url))
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (str(e), self.jenkins_url))
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(msg="python-jenkins required for this module. "\
+ "see http://python-jenkins.readthedocs.io/en/latest/install.html")
+
+ if not python_lxml_installed:
+ module.fail_json(msg="lxml required for this module. "\
+ "see http://lxml.de/installation.html")
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ config = dict(required=False),
+ name = dict(required=True),
+ password = dict(required=False, no_log=True),
+ state = dict(required=False, choices=['present', 'absent'], default="present"),
+ enabled = dict(required=False, type='bool'),
+ token = dict(required=False, no_log=True),
+ url = dict(required=False, default="http://localhost:8080"),
+ user = dict(required=False)
+ ),
+ mutually_exclusive = [
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/jenkins_plugin.py b/lib/ansible/modules/extras/web_infrastructure/jenkins_plugin.py
new file mode 100644
index 0000000000..266d7f49c7
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/jenkins_plugin.py
@@ -0,0 +1,830 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+import base64
+import hashlib
+import json
+import os
+import tempfile
+import time
+import urllib
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+version_added: '2.2'
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+options:
+ group:
+ required: false
+ default: jenkins
+ description:
+ - Name of the Jenkins group on the OS.
+ jenkins_home:
+ required: false
+ default: /var/lib/jenkins
+ description:
+ - Home directory of the Jenkins user.
+ mode:
+ required: false
+ default: '0664'
+ description:
+ - File mode applied on versioned plugins.
+ name:
+ required: true
+ description:
+ - Plugin name.
+ owner:
+ required: false
+ default: jenkins
+ description:
+ - Name of the Jenkins user on the OS.
+ params:
+ required: false
+ default: null
+ description:
+ - Option used to allow the user to overwrite any of the other options. To
+ remove an option, set the value of the option to C(null).
+ state:
+ required: false
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ timeout:
+ required: false
+ default: 30
+ description:
+ - Server connection timeout in secs.
+ updates_expiration:
+ required: false
+ default: 86400
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ updates_url:
+ required: false
+ default: https://updates.jenkins-ci.org
+ description:
+ - URL of the Update Centre.
+ - Used as the base URL to download the plugins and the
+ I(update-center.json) JSON file.
+ url:
+ required: false
+ default: http://localhost:8080
+ description:
+ - URL of the Jenkins server.
+ version:
+ required: false
+ default: null
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ with_dependencies:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - Defines whether to install plugin dependencies.
+
+notes:
+ - Plugin installation shoud be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkis service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: no
+
+- name: Make sure the plugin is always up-to-date
+ jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ jenkins_plugin:
+ name: token-macro
+ version: 1.15
+
+- name: Pin the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to use the params
+#
+# Define a variable and specify all default parameters you want to use across
+# all jenkins_plugin calls:
+#
+# my_jenkins_params:
+# url_username: admin
+# url_password: p4ssw0rd
+# url: http://localhost:8888
+#
+- name: Install plugin
+ jenkins_plugin:
+ name: build-pipeline-plugin
+ params: "{{ my_jenkins_params }}"
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: yes
+ build-pipeline-plugin:
+ version: 1.4.9
+ pinned: no
+ enabled: yes
+ tasks:
+ - name: Install plugins without a specific version
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: my_jenkins_plugins
+
+ - name: Install plugins with a specific version
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: my_jenkins_plugins
+
+ - name: Initiate the fact
+ set_fact:
+ jenkins_restart_required: no
+
+ - name: Check if restart is required by any of the versioned plugins
+ set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: my_jenkins_plugin_versioned.results
+
+ - name: Check if restart is required by any of the unversioned plugins
+ set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: my_jenkins_plugin_unversioned.results
+
+ - name: Restart Jenkins if required
+ service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ # Requires python-httplib2 to be installed on the guest
+ - name: Wait for Jenkins to start up
+ uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ set_fact:
+ jenkins_restart_required: no
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: my_jenkins_plugins
+
+ - name: Plugin enabling
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: my_jenkins_plugins
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: string
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: string
+ sample: "present"
+'''
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+
+ if self._csrf_enabled():
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ return csrf_data["useCrumbs"]
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.load(r)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=e.message)
+
+ return json_data
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, **kwargs)
+
+ if info['status'] != 200:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg=msg_exception, details=e.message)
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] is None:
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ script_data.update(self.crumb)
+ data = urllib.urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data)
+
+ changed = True
+ else:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ md5sum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ md5sum_old = hashlib.md5(
+ open(plugin_file, 'rb').read()).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_url = (
+ "%s/latest/%s.hpi" % (
+ self.params['updates_url'],
+ self.params['name']))
+ else:
+ # Take specific version
+ plugin_url = (
+ "{0}/download/plugins/"
+ "{1}/{2}/{1}.hpi".format(
+ self.params['updates_url'],
+ self.params['name'],
+ self.params['version']))
+
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ md5sum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_url)
+
+ # Write downloaded plugin into file if checksums don't match
+ if md5sum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ md5sum_new = hashlib.md5(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if md5sum_old != md5sum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ else:
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ try:
+ sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot calculate SHA1 of the old plugin.",
+ details=e.message)
+
+ sha1sum_old = base64.b64encode(sha1_old.digest())
+
+ # If the latest version changed, download it
+ if sha1sum_old != plugin_data['sha1']:
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_url)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _download_updates(self):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = "%s/%s" % (updates_dir, updates_filename)
+ download_updates = True
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < self.params['updates_expiration']:
+ download_updates = False
+
+ updates_file_orig = updates_file
+
+ # Download the updates file if needed
+ if download_updates:
+ url = "%s/update-center.json" % self.params['updates_url']
+
+ # Get the data
+ r = self._get_url_data(
+ url,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ updates_file = tempfile.mkstemp()
+
+ try:
+ fd = open(updates_file, 'wb')
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot open the tmp updates file %s." % updates_file,
+ details=str(e))
+
+ fd.write(r.read())
+
+ try:
+ fd.close()
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % updates_file,
+ detail=str(e))
+
+ # Open the updates file
+ try:
+ f = open(updates_file)
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot open temporal updates file.",
+ details=str(e))
+
+ i = 0
+ for line in f:
+ # Read only the second line
+ if i == 1:
+ try:
+ data = json.loads(line)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot load JSON data from the tmp updates file.",
+ details=e.message)
+
+ break
+
+ i += 1
+
+ # Move the updates file to the right place if we could read it
+ if download_updates:
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ try:
+ os.makedirs(updates_dir, int('0700', 8))
+ except OSError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=e.message)
+
+ self.module.atomic_move(updates_file, updates_file_orig)
+
+ # Check if we have the plugin data available
+ if 'plugins' not in data or self.params['name'] not in data['plugins']:
+ self.module.fail_json(
+ msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_url):
+ # Download the plugin
+ r = self._get_url_data(
+ plugin_url,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ return r
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f = tempfile.mkstemp()
+
+ try:
+ fd = open(tmp_f, 'wb')
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg='Cannot open the temporal plugin file %s.' % tmp_f,
+ details=str(e))
+
+ if isinstance(data, str):
+ d = data
+ else:
+ d = data.read()
+
+ fd.write(d)
+
+ try:
+ fd.close()
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=str(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+ data = urllib.urlencode(self.crumb)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg,
+ data=data)
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(default='jenkins'),
+ jenkins_home=dict(default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(required=True),
+ owner=dict(default='jenkins'),
+ params=dict(type='dict'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(default='https://updates.jenkins-ci.org'),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Update module parameters by user's parameters if defined
+ if 'params' in module.params and isinstance(module.params['params'], dict):
+ module.params.update(module.params['params'])
+ # Remove the params
+ module.params.pop('params', None)
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError:
+ e = get_exception()
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=str(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/jira.py b/lib/ansible/modules/extras/web_infrastructure/jira.py
new file mode 100755
index 0000000000..0053e0a32c
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/jira.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve Smith <ssmith@atlassian.com>
+# Atlassian open-source approval reference OSR-76.
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = """
+module: jira
+version_added: "1.6"
+short_description: create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+options:
+ uri:
+ required: true
+ description:
+ - Base URI for the JIRA instance
+
+ operation:
+ required: true
+ aliases: [ command ]
+ choices: [ create, comment, edit, fetch, transition ]
+ description:
+ - The operation to perform.
+
+ username:
+ required: true
+ description:
+ - The username to log-in with.
+
+ password:
+ required: true
+ description:
+ - The password to log-in with.
+
+ project:
+ aliases: [ prj ]
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ required: false
+ description:
+ - The issue summary, where appropriate.
+
+ description:
+ required: false
+ description:
+ - The issue description, where appropriate.
+
+ issuetype:
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ required: false
+ description:
+ - An existing issue key to operate on.
+
+ comment:
+ required: false
+ description:
+ - The comment text to add.
+
+ status:
+ required: false
+ description:
+ - The desired status; only relevant for the transition operation.
+
+ assignee:
+ required: false
+ description:
+ - Sets the assignee on create or transition operations. Note not all transitions will allow this.
+
+ fields:
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields.
+
+notes:
+ - "Currently this only works with basic-auth."
+
+author: "Steve Smith (@tarka)"
+"""
+
+EXAMPLES = """
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ project=ANS operation=create
+ summary="Example Issue" description="Created using Ansible" issuetype=Task
+ register: issue
+
+- name: Comment on issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=comment
+ comment="A comment added by Ansible"
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=edit
+ assignee=ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ project=ANS operation=create
+ summary="Assigned issue" description="Created and assigned using Ansible"
+ issuetype=Task assignee=ssmith
+
+# Edit an issue using free-form fields
+- name: Set the labels on an issue using free-form fields
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=edit
+ args: { fields: {labels: ["autocreated", "ansible"]}}
+
+- name: Set the labels on an issue, YAML version
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=edit
+ args:
+ fields:
+ labels:
+ - "autocreated"
+ - "ansible"
+ - "yaml"
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ project=ANS operation=fetch issue="ANS-63"
+ register: issue
+
+- name: Create a unix account for the reporter
+ sudo: true
+ user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}"
+
+# Transition an issue by target status
+- name: Close the issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=transition status="Done"
+"""
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import base64
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.pycompat24 import get_exception
+
+def request(url, user, passwd, data=None, method=None):
+ if data:
+ data = json.dumps(data)
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the basic-auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+ auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
+ response, info = fetch_url(module, url, data=data, method=method,
+ headers={'Content-Type':'application/json',
+ 'Authorization':"Basic %s" % auth})
+
+ if info['status'] not in (200, 201, 204):
+ module.fail_json(msg=info['msg'])
+
+ body = response.read()
+
+ if body:
+ return json.loads(body)
+ else:
+ return {}
+
+def post(url, user, passwd, data):
+ return request(url, user, passwd, data=data, method='POST')
+
+def put(url, user, passwd, data):
+ return request(url, user, passwd, data=data, method='PUT')
+
+def get(url, user, passwd):
+ return request(url, user, passwd)
+
+
+def create(restbase, user, passwd, params):
+ createfields = {
+ 'project': { 'key': params['project'] },
+ 'summary': params['summary'],
+ 'description': params['description'],
+ 'issuetype': { 'name': params['issuetype'] }}
+
+ # Merge in any additional or overridden fields
+ if params['fields']:
+ createfields.update(params['fields'])
+
+ data = {'fields': createfields}
+
+ url = restbase + '/issue/'
+
+ ret = post(url, user, passwd, data)
+
+ return ret
+
+
+def comment(restbase, user, passwd, params):
+ data = {
+ 'body': params['comment']
+ }
+
+ url = restbase + '/issue/' + params['issue'] + '/comment'
+
+ ret = post(url, user, passwd, data)
+
+ return ret
+
+
+def edit(restbase, user, passwd, params):
+ data = {
+ 'fields': params['fields']
+ }
+
+ url = restbase + '/issue/' + params['issue']
+
+ ret = put(url, user, passwd, data)
+
+ return ret
+
+
+def fetch(restbase, user, passwd, params):
+ url = restbase + '/issue/' + params['issue']
+ ret = get(url, user, passwd)
+ return ret
+
+
+def transition(restbase, user, passwd, params):
+ # Find the transition id
+ turl = restbase + '/issue/' + params['issue'] + "/transitions"
+ tmeta = get(turl, user, passwd)
+
+ target = params['status']
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+
+ if not tid:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ # Perform it
+ url = restbase + '/issue/' + params['issue'] + "/transitions"
+ data = { 'transition': { "id" : tid },
+ 'fields': params['fields']}
+
+ ret = post(url, user, passwd, data)
+
+ return ret
+
+
+# Some parameters are required depending on the operation:
+OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'],
+ comment=['issue', 'comment'],
+ edit=[],
+ fetch=['issue'],
+ transition=['status'])
+
+def main():
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ uri=dict(required=True),
+ operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'],
+ aliases=['command'], required=True),
+ username=dict(required=True),
+ password=dict(required=True),
+ project=dict(),
+ summary=dict(),
+ description=dict(),
+ issuetype=dict(),
+ issue=dict(aliases=['ticket']),
+ comment=dict(),
+ status=dict(),
+ assignee=dict(),
+ fields=dict(default={})
+ ),
+ supports_check_mode=False
+ )
+
+ op = module.params['operation']
+
+ # Check we have the necessary per-operation parameters
+ missing = []
+ for parm in OP_REQUIRED[op]:
+ if not module.params[parm]:
+ missing.append(parm)
+ if missing:
+ module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing)))
+
+ # Handle rest of parameters
+ uri = module.params['uri']
+ user = module.params['username']
+ passwd = module.params['password']
+ if module.params['assignee']:
+ module.params['fields']['assignee'] = { 'name': module.params['assignee'] }
+
+ if not uri.endswith('/'):
+ uri = uri+'/'
+ restbase = uri + 'rest/api/2'
+
+ # Dispatch
+ try:
+
+ # Lookup the corresponding method for this operation. This is
+ # safe as the AnsibleModule should remove any unknown operations.
+ thismod = sys.modules[__name__]
+ method = getattr(thismod, op)
+
+ ret = method(restbase, user, passwd, module.params)
+
+ except Exception:
+ e = get_exception()
+ return module.fail_json(msg=e.message)
+
+
+ module.exit_json(changed=True, meta=ret)
+
+
+
+main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/letsencrypt.py b/lib/ansible/modules/extras/web_infrastructure/letsencrypt.py
new file mode 100644
index 0000000000..a43014a8ab
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/letsencrypt.py
@@ -0,0 +1,795 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import binascii
+import copy
+import textwrap
+from datetime import datetime
+
+DOCUMENTATION = '''
+---
+module: letsencrypt
+author: "Michael Gruener (@mgruener)"
+version_added: "2.2"
+short_description: Create SSL certificates with Let's Encrypt
+description:
+ - "Create and renew SSL certificates with Let's Encrypt. Let’s Encrypt is a
+ free, automated, and open certificate authority (CA), run for the
+ public’s benefit. For details see U(https://letsencrypt.org). The current
+ implementation supports the http-01, tls-sni-02 and dns-01 challenges."
+ - "To use this module, it has to be executed at least twice. Either as two
+ different tasks in the same run or during multiple runs."
+ - "Between these two tasks you have to fulfill the required steps for the
+ choosen challenge by whatever means necessary. For http-01 that means
+ creating the necessary challenge file on the destination webserver. For
+ dns-01 the necessary dns record has to be created. tls-sni-02 requires
+ you to create a SSL certificate with the appropriate subjectAlternativeNames.
+ It is I(not) the responsibility of this module to perform these steps."
+ - "For details on how to fulfill these challenges, you might have to read through
+ U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)"
+ - "Although the defaults are choosen so that the module can be used with
+ the Let's Encrypt CA, the module can be used with any service using the ACME
+ protocol."
+requirements:
+ - "python >= 2.6"
+ - openssl
+options:
+ account_key:
+ description:
+ - "File containing the the Let's Encrypt account RSA key."
+ - "Can be created with C(openssl rsa ...)."
+ required: true
+ account_email:
+ description:
+ - "The email address associated with this account."
+ - "It will be used for certificate expiration warnings."
+ required: false
+ default: null
+ acme_directory:
+ description:
+ - "The ACME directory to use. This is the entry point URL to access
+ CA server API."
+ - "For safety reasons the default is set to the Let's Encrypt staging server.
+ This will create technically correct, but untrusted certificates."
+ required: false
+ default: https://acme-staging.api.letsencrypt.org/directory
+ agreement:
+ description:
+ - "URI to a terms of service document you agree to when using the
+ ACME service at C(acme_directory)."
+ required: false
+ default: 'https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf'
+ challenge:
+ description: The challenge to be performed.
+ required: false
+ choices: [ 'http-01', 'dns-01', 'tls-sni-02']
+ default: 'http-01'
+ csr:
+ description:
+ - "File containing the CSR for the new certificate."
+ - "Can be created with C(openssl csr ...)."
+ - "The CSR may contain multiple Subject Alternate Names, but each one
+ will lead to an individual challenge that must be fulfilled for the
+ CSR to be signed."
+ required: true
+ alias: ['src']
+ data:
+ description:
+ - "The data to validate ongoing challenges."
+ - "The value that must be used here will be provided by a previous use
+ of this module."
+ required: false
+ default: null
+ dest:
+ description: The destination file for the certificate.
+ required: true
+ alias: ['cert']
+ remaining_days:
+ description:
+ - "The number of days the certificate must have left being valid before it
+ will be renewed."
+ required: false
+ default: 10
+'''
+
+EXAMPLES = '''
+- letsencrypt:
+ account_key: /etc/pki/cert/private/account.key
+ csr: /etc/pki/cert/csr/sample.com.csr
+ dest: /etc/httpd/ssl/sample.com.crt
+ register: sample_com_challenge
+
+# perform the necessary steps to fulfill the challenge
+# for example:
+#
+# - copy:
+# dest: /var/www/html/{{ sample_com_http_challenge['challenge_data']['sample.com']['http-01']['resource'] }}
+# content: "{{ sample_com_http_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}"
+# when: sample_com_challenge|changed
+
+- letsencrypt:
+ account_key: /etc/pki/cert/private/account.key
+ csr: /etc/pki/cert/csr/sample.com.csr
+ dest: /etc/httpd/ssl/sample.com.crt
+ data: "{{ sample_com_challenge }}"
+'''
+
+RETURN = '''
+cert_days:
+ description: the number of days the certificate remains valid.
+ returned: success
+challenge_data:
+ description: per domain / challenge type challenge data
+ returned: changed
+ type: dictionary
+ contains:
+ resource:
+ description: the challenge resource that must be created for validation
+ returned: changed
+ type: string
+ sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA
+ resource_value:
+ description: the value the resource has to produce for the validation
+ returned: changed
+ type: string
+ sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA
+authorizations:
+ description: ACME authorization data.
+ returned: changed
+ type: list
+ contains:
+ authorization:
+ description: ACME authorization object. See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.1.2
+ returned: success
+ type: dict
+'''
+
+def nopad_b64(data):
+ return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "")
+
+def simple_get(module,url):
+ resp, info = fetch_url(module, url, method='GET')
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+
+ if content:
+ if info['content-type'].startswith('application/json'):
+ try:
+ result = module.from_json(content.decode('utf8'))
+ except ValueError:
+ module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content))
+ else:
+ result = content
+
+ if info['status'] >= 400:
+ module.fail_json(msg="ACME request failed: CODE: {0} RESULT:{1}".format(info['status'],result))
+ return result
+
+def get_cert_days(module,cert_file):
+ '''
+ Return the days the certificate in cert_file remains valid and -1
+ if the file was not found.
+ '''
+ if not os.path.exists(cert_file):
+ return -1
+
+ openssl_bin = module.get_bin_path('openssl', True)
+ openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"]
+ _, out, _ = module.run_command(openssl_cert_cmd,check_rc=True)
+ try:
+ not_after_str = re.search(r"\s+Not After\s*:\s+(.*)",out.decode('utf8')).group(1)
+ not_after = datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_after_str,'%b %d %H:%M:%S %Y %Z')))
+ except AttributeError:
+ module.fail_json(msg="No 'Not after' date found in {0}".format(cert_file))
+ except ValueError:
+ module.fail_json(msg="Faild to parse 'Not after' date of {0}".format(cert_file))
+ now = datetime.datetime.utcnow()
+ return (not_after - now).days
+
+# function source: network/basics/uri.py
+def write_file(module, dest, content):
+ '''
+ Write content to destination file dest, only if the content
+ has changed.
+ '''
+ changed = False
+ # create a tempfile with some test content
+ _, tmpsrc = tempfile.mkstemp()
+ f = open(tmpsrc, 'wb')
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to create temporary content file: %s" % str(err))
+ f.close()
+ checksum_src = None
+ checksum_dest = None
+ # raise an error if there is no tmpsrc file
+ if not os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Source %s does not exist" % (tmpsrc))
+ if not os.access(tmpsrc, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json( msg="Source %s not readable" % (tmpsrc))
+ checksum_src = module.sha1(tmpsrc)
+ # check if there is no dest file
+ if os.path.exists(dest):
+ # raise an error if copy has no permission on dest
+ if not os.access(dest, os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s not writable" % (dest))
+ if not os.access(dest, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s not readable" % (dest))
+ checksum_dest = module.sha1(dest)
+ else:
+ if not os.access(os.path.dirname(dest), os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
+ if checksum_src != checksum_dest:
+ try:
+ shutil.copyfile(tmpsrc, dest)
+ changed = True
+ except Exception as err:
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
+ os.remove(tmpsrc)
+ return changed
+
+class ACMEDirectory(object):
+ '''
+ The ACME server directory. Gives access to the available resources
+ and the Replay-Nonce for a given uri. This only works for
+ uris that permit GET requests (so normally not the ones that
+ require authentication).
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.2
+ '''
+ def __init__(self, module):
+ self.module = module
+ self.directory_root = module.params['acme_directory']
+
+ self.directory = simple_get(self.module,self.directory_root)
+
+ def __getitem__(self, key): return self.directory[key]
+
+ def get_nonce(self,resource=None):
+ url = self.directory_root
+ if resource is not None:
+ url = resource
+ _, info = fetch_url(self.module, url, method='HEAD')
+ if info['status'] != 200:
+ self.module.fail_json(msg="Failed to get replay-nonce, got status {0}".format(info['status']))
+ return info['replay-nonce']
+
+class ACMEAccount(object):
+ '''
+ ACME account object. Handles the authorized communication with the
+ ACME server. Provides access to accound bound information like
+ the currently active authorizations and valid certificates
+ '''
+ def __init__(self,module):
+ self.module = module
+ self.agreement = module.params['agreement']
+ self.key = module.params['account_key']
+ self.email = module.params['account_email']
+ self.data = module.params['data']
+ self.directory = ACMEDirectory(module)
+ self.uri = None
+ self.changed = False
+
+ self._authz_list_uri = None
+ self._certs_list_uri = None
+
+ if not os.path.exists(self.key):
+ module.fail_json(msg="Account key %s not found" % (self.key))
+
+ self._openssl_bin = module.get_bin_path('openssl', True)
+
+ pub_hex, pub_exp = self._parse_account_key(self.key)
+ self.jws_header = {
+ "alg": "RS256",
+ "jwk": {
+ "e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
+ "kty": "RSA",
+ "n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
+ },
+ }
+ self.init_account()
+
+ def get_keyauthorization(self,token):
+ '''
+ Returns the key authorization for the given token
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.1
+ '''
+ accountkey_json = json.dumps(self.jws_header['jwk'], sort_keys=True, separators=(',', ':'))
+ thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
+ return "{0}.{1}".format(token, thumbprint)
+
+ def _parse_account_key(self,key):
+ '''
+ Parses an RSA key file in PEM format and returns the modulus
+ and public exponent of the key
+ '''
+ openssl_keydump_cmd = [self._openssl_bin, "rsa", "-in", key, "-noout", "-text"]
+ _, out, _ = self.module.run_command(openssl_keydump_cmd,check_rc=True)
+
+ pub_hex, pub_exp = re.search(
+ r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
+ out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
+ pub_exp = "{0:x}".format(int(pub_exp))
+ if len(pub_exp) % 2:
+ pub_exp = "0{0}".format(pub_exp)
+
+ return pub_hex, pub_exp
+
+ def send_signed_request(self, url, payload):
+ '''
+ Sends a JWS signed HTTP POST request to the ACME server and returns
+ the response as dictionary
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.2
+ '''
+ protected = copy.deepcopy(self.jws_header)
+ protected["nonce"] = self.directory.get_nonce()
+
+ try:
+ payload64 = nopad_b64(self.module.jsonify(payload).encode('utf8'))
+ protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8'))
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload / headers as JSON: {0}".format(e))
+
+ openssl_sign_cmd = [self._openssl_bin, "dgst", "-sha256", "-sign", self.key]
+ sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
+ _, out, _ = self.module.run_command(openssl_sign_cmd,data=sign_payload,check_rc=True, binary_data=True)
+
+ data = self.module.jsonify({
+ "header": self.jws_header,
+ "protected": protected64,
+ "payload": payload64,
+ "signature": nopad_b64(out),
+ })
+
+ resp, info = fetch_url(self.module, url, data=data, method='POST')
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+
+ if content:
+ if info['content-type'].startswith('application/json'):
+ try:
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content))
+ else:
+ result = content
+
+ return result,info
+
+ def _new_reg(self,contact=[]):
+ '''
+ Registers a new ACME account. Returns True if the account was
+ created and False if it already existed (e.g. it was not newly
+ created)
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
+ '''
+ if self.uri is not None:
+ return True
+
+ new_reg = {
+ 'resource': 'new-reg',
+ 'agreement': self.agreement,
+ 'contact': contact
+ }
+
+ result, info = self.send_signed_request(self.directory['new-reg'], new_reg)
+ if 'location' in info:
+ self.uri = info['location']
+
+ if info['status'] in [200,201]:
+ # Account did not exist
+ self.changed = True
+ return True
+ elif info['status'] == 409:
+ # Account did exist
+ return False
+ else:
+ self.module.fail_json(msg="Error registering: {0} {1}".format(info['status'], result))
+
+ def init_account(self):
+ '''
+ Create or update an account on the ACME server. As the only way
+ (without knowing an account URI) to test if an account exists
+ is to try and create one with the provided account key, this
+ method will always result in an account being present (except
+ on error situations). If the account already exists, it will
+ update the contact information.
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
+ '''
+
+ contact = []
+ if self.email:
+ contact.append('mailto:' + self.email)
+
+ # if this is not a new registration (e.g. existing account)
+ if not self._new_reg(contact):
+ # pre-existing account, get account data...
+ result, _ = self.send_signed_request(self.uri, {'resource':'reg'})
+
+ # XXX: letsencrypt/boulder#1435
+ if 'authorizations' in result:
+ self._authz_list_uri = result['authorizations']
+ if 'certificates' in result:
+ self._certs_list_uri = result['certificates']
+
+ # ...and check if update is necessary
+ do_update = False
+ if 'contact' in result:
+ if cmp(contact,result['contact']) != 0:
+ do_update = True
+ elif len(contact) > 0:
+ do_update = True
+
+ if do_update:
+ upd_reg = result
+ upd_reg['contact'] = contact
+ result, _ = self.send_signed_request(self.uri, upd_reg)
+ self.changed = True
+
+ def get_authorizations(self):
+ '''
+ Return a list of currently active authorizations
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
+ '''
+ authz_list = {'authorizations': []}
+ if self._authz_list_uri is None:
+ # XXX: letsencrypt/boulder#1435
+ # Workaround, retrieve the known authorization urls
+ # from the data attribute
+ # It is also a way to limit the queried authorizations, which
+ # might become relevant at some point
+ if (self.data is not None) and ('authorizations' in self.data):
+ for auth in self.data['authorizations']:
+ authz_list['authorizations'].append(auth['uri'])
+ else:
+ return []
+ else:
+ # TODO: need to handle pagination
+ authz_list = simple_get(self.module, self._authz_list_uri)
+
+ authz = []
+ for auth_uri in authz_list['authorizations']:
+ auth = simple_get(self.module,auth_uri)
+ auth['uri'] = auth_uri
+ authz.append(auth)
+
+ return authz
+
+class ACMEClient(object):
+ '''
+ ACME client class. Uses an ACME account object and a CSR to
+ start and validate ACME challenges and download the respective
+ certificates.
+ '''
+ def __init__(self,module):
+ self.module = module
+ self.challenge = module.params['challenge']
+ self.csr = module.params['csr']
+ self.dest = module.params['dest']
+ self.account = ACMEAccount(module)
+ self.directory = self.account.directory
+ self.authorizations = self.account.get_authorizations()
+ self.cert_days = -1
+ self.changed = self.account.changed
+
+ if not os.path.exists(self.csr):
+ module.fail_json(msg="CSR %s not found" % (self.csr))
+
+ self._openssl_bin = module.get_bin_path('openssl', True)
+ self.domains = self._get_csr_domains()
+
+ def _get_csr_domains(self):
+ '''
+ Parse the CSR and return the list of requested domains
+ '''
+ openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-noout", "-text"]
+ _, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True)
+
+ domains = set([])
+ common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
+ if common_name is not None:
+ domains.add(common_name.group(1))
+ subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL)
+ if subject_alt_names is not None:
+ for san in subject_alt_names.group(1).split(", "):
+ if san.startswith("DNS:"):
+ domains.add(san[4:])
+ return domains
+
+
+ def _get_domain_auth(self,domain):
+ '''
+ Get the status string of the first authorization for the given domain.
+ Return None if no active authorization for the given domain was found.
+ '''
+ if self.authorizations is None:
+ return None
+
+ for auth in self.authorizations:
+ if (auth['identifier']['type'] == 'dns') and (auth['identifier']['value'] == domain):
+ return auth
+ return None
+
+ def _add_or_update_auth(self,auth):
+ '''
+ Add or update the given authroization in the global authorizations list.
+ Return True if the auth was updated/added and False if no change was
+ necessary.
+ '''
+ for index,cur_auth in enumerate(self.authorizations):
+ if (cur_auth['uri'] == auth['uri']):
+ # does the auth parameter contain updated data?
+ if cmp(cur_auth,auth) != 0:
+ # yes, update our current authorization list
+ self.authorizations[index] = auth
+ return True
+ else:
+ return False
+ # this is a new authorization, add it to the list of current
+ # authorizations
+ self.authorizations.append(auth)
+ return True
+
+ def _new_authz(self,domain):
+ '''
+ Create a new authorization for the given domain.
+ Return the authorization object of the new authorization
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
+ '''
+ if self.account.uri is None:
+ return
+
+ new_authz = {
+ "resource": "new-authz",
+ "identifier": {"type": "dns", "value": domain},
+ }
+
+ result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz)
+ if info['status'] not in [200,201]:
+ self.module.fail_json(msg="Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result))
+ else:
+ result['uri'] = info['location']
+ return result
+
+ def _get_challenge_data(self,auth):
+ '''
+ Returns a dict with the data for all proposed (and supported) challenges
+ of the given authorization.
+ '''
+
+ data = {}
+ # no need to choose a specific challenge here as this module
+ # is not responsible for fulfilling the challenges. Calculate
+ # and return the required information for each challenge.
+ for challenge in auth['challenges']:
+ type = challenge['type']
+ token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
+ keyauthorization = self.account.get_keyauthorization(token)
+
+ # NOTE: tls-sni-01 is not supported by choice
+ # too complex to be usefull and tls-sni-02 is an alternative
+ # as soon as it is implemented server side
+ if type == 'http-01':
+ # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2
+ resource = '.well-known/acme-challenge/' + token
+ value = keyauthorization
+ elif type == 'tls-sni-02':
+ # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.3
+ token_digest = hashlib.sha256(token.encode('utf8')).hexdigest()
+ ka_digest = hashlib.sha256(keyauthorization.encode('utf8')).hexdigest()
+ len_token_digest = len(token_digest)
+ len_ka_digest = len(ka_digest)
+ resource = 'subjectAlternativeNames'
+ value = [
+ "{0}.{1}.token.acme.invalid".format(token_digest[:len_token_digest/2],token_digest[len_token_digest/2:]),
+ "{0}.{1}.ka.acme.invalid".format(ka_digest[:len_ka_digest/2],ka_digest[len_ka_digest/2:]),
+ ]
+ elif type == 'dns-01':
+ # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.4
+ resource = '_acme-challenge'
+ value = nopad_b64(hashlib.sha256(keyauthorization).digest()).encode('utf8')
+ else:
+ continue
+
+ data[type] = { 'resource': resource, 'resource_value': value }
+ return data
+
+ def _validate_challenges(self,auth):
+ '''
+ Validate the authorization provided in the auth dict. Returns True
+ when the validation was successfull and False when it was not.
+ '''
+ for challenge in auth['challenges']:
+ if self.challenge != challenge['type']:
+ continue
+
+ uri = challenge['uri']
+ token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
+ keyauthorization = self.account.get_keyauthorization(token)
+
+ challenge_response = {
+ "resource": "challenge",
+ "keyAuthorization": keyauthorization,
+ }
+ result, info = self.account.send_signed_request(uri, challenge_response)
+ if info['status'] not in [200,202]:
+ self.module.fail_json(msg="Error validating challenge: CODE: {0} RESULT: {1}".format(info['status'], result))
+
+ status = ''
+
+ while status not in ['valid','invalid','revoked']:
+ result = simple_get(self.module,auth['uri'])
+ result['uri'] = auth['uri']
+ if self._add_or_update_auth(result):
+ self.changed = True
+ # draft-ietf-acme-acme-02
+ # "status (required, string): ...
+ # If this field is missing, then the default value is "pending"."
+ if 'status' not in result:
+ status = 'pending'
+ else:
+ status = result['status']
+ time.sleep(2)
+
+ if status == 'invalid':
+ error_details = ''
+ # multiple challenges could have failed at this point, gather error
+ # details for all of them before failing
+ for challenge in result['challenges']:
+ if challenge['status'] == 'invalid':
+ error_details += ' CHALLENGE: {0}'.format(challenge['type'])
+ if 'error' in challenge:
+ error_details += ' DETAILS: {0};'.format(challenge['error']['detail'])
+ else:
+ error_details += ';'
+ self.module.fail_json(msg="Authorization for {0} returned invalid: {1}".format(result['identifier']['value'],error_details))
+
+ return status == 'valid'
+
+ def _new_cert(self):
+ '''
+ Create a new certificate based on the csr.
+ Return the certificate object as dict
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5
+ '''
+ openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"]
+ _, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True)
+
+ new_cert = {
+ "resource": "new-cert",
+ "csr": nopad_b64(out),
+ }
+ result, info = self.account.send_signed_request(self.directory['new-cert'], new_cert)
+ if info['status'] not in [200,201]:
+ self.module.fail_json(msg="Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result))
+ else:
+ return {'cert': result, 'uri': info['location']}
+
+ def _der_to_pem(self,der_cert):
+ '''
+ Convert the DER format certificate in der_cert to a PEM format
+ certificate and return it.
+ '''
+ return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
+ "\n".join(textwrap.wrap(base64.b64encode(der_cert).decode('utf8'), 64)))
+
+ def do_challenges(self):
+ '''
+ Create new authorizations for all domains of the CSR and return
+ the challenge details for the choosen challenge type.
+ '''
+ data = {}
+ for domain in self.domains:
+ auth = self._get_domain_auth(domain)
+ if auth is None:
+ new_auth = self._new_authz(domain)
+ self._add_or_update_auth(new_auth)
+ data[domain] = self._get_challenge_data(new_auth)
+ self.changed = True
+ elif (auth['status'] == 'pending') or ('status' not in auth):
+ # draft-ietf-acme-acme-02
+ # "status (required, string): ...
+ # If this field is missing, then the default value is "pending"."
+ self._validate_challenges(auth)
+ # _validate_challenges updates the global authrozation dict,
+ # so get the current version of the authorization we are working
+ # on to retrieve the challenge data
+ data[domain] = self._get_challenge_data(self._get_domain_auth(domain))
+
+ return data
+
+ def get_certificate(self):
+ '''
+ Request a new certificate and write it to the destination file.
+ Only do this if a destination file was provided and if all authorizations
+ for the domains of the csr are valid. No Return value.
+ '''
+ if self.dest is None:
+ return
+
+ for domain in self.domains:
+ auth = self._get_domain_auth(domain)
+ if auth is None or ('status' not in auth) or (auth['status'] != 'valid'):
+ return
+
+ cert = self._new_cert()
+ if cert['cert'] is not None:
+ pem_cert = self._der_to_pem(cert['cert'])
+ if write_file(self.module,self.dest,pem_cert):
+ self.cert_days = get_cert_days(self.module,self.dest)
+ self.changed = True
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ account_key = dict(required=True, type='path'),
+ account_email = dict(required=False, default=None, type='str'),
+ acme_directory = dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'),
+ agreement = dict(required=False, default='https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf', type='str'),
+ challenge = dict(required=False, default='http-01', choices=['http-01', 'dns-01', 'tls-sni-02'], type='str'),
+ csr = dict(required=True, aliases=['src'], type='path'),
+ data = dict(required=False, no_log=True, default=None, type='dict'),
+ dest = dict(required=True, aliases=['cert'], type='path'),
+ remaining_days = dict(required=False, default=10, type='int'),
+ ),
+ supports_check_mode = True,
+ )
+
+ cert_days = get_cert_days(module,module.params['dest'])
+ if cert_days < module.params['remaining_days']:
+ # If checkmode is active, base the changed state solely on the status
+ # of the certificate file as all other actions (accessing an account, checking
+ # the authorization status...) would lead to potential changes of the current
+ # state
+ if module.check_mode:
+ module.exit_json(changed=True,authorizations={},
+ challenge_data={},cert_days=cert_days)
+ else:
+ client = ACMEClient(module)
+ client.cert_days = cert_days
+ data = client.do_challenges()
+ client.get_certificate()
+ module.exit_json(changed=client.changed,authorizations=client.authorizations,
+ challenge_data=data,cert_days=client.cert_days)
+ else:
+ module.exit_json(changed=False,cert_days=cert_days)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/web_infrastructure/taiga_issue.py b/lib/ansible/modules/extras/web_infrastructure/taiga_issue.py
new file mode 100644
index 0000000000..e58c6c0270
--- /dev/null
+++ b/lib/ansible/modules/extras/web_infrastructure/taiga_issue.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+version_added: "2.0"
+options:
+ taiga_host:
+ description:
+ - The hostname of the Taiga instance.
+ required: False
+ default: https://api.taiga.io
+ project:
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: True
+ subject:
+ description:
+ - The issue subject.
+ required: True
+ issue_type:
+ description:
+ - The issue type. Must exist previously.
+ required: True
+ priority:
+ description:
+ - The issue priority. Must exist previously.
+ required: False
+ default: Normal
+ status:
+ description:
+ - The issue status. Must exist previously.
+ required: False
+ default: New
+ severity:
+ description:
+ - The issue severity. Must exist previously.
+ required: False
+ default: Normal
+ description:
+ description:
+ - The issue description.
+ required: False
+ default: ""
+ attachment:
+ description:
+ - Path to a file to be attached to the issue.
+ required: False
+ default: None
+ attachment_description:
+ description:
+ - A string describing the file to be attached to the issue.
+ required: False
+ default: ""
+ tags:
+ description:
+ - A lists of tags to be assigned to the issue.
+ required: False
+ default: []
+ state:
+ description:
+ - Whether the issue should be present or not.
+ required: False
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+# Create an issue in the my hosted Taiga environment and attach an error log
+- taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+# Deletes the previously created issue
+- taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+from os import getenv
+from os.path import isfile
+
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED=True
+except ImportError:
+ TAIGA_MODULE_IMPORTED=False
+
+def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return (False, changed, "Missing credentials", {})
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
+ if len(project_list) != 1:
+ return (False, changed, "Unable to find project %s" % project_name, {})
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
+ if len(priority_list) != 1:
+ return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
+ priority_id = priority_list[0].id
+
+ status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
+ if len(status_list) != 1:
+ return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
+ status_id = status_list[0].id
+
+ type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
+ if len(type_list) != 1:
+ return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
+ type_id = type_list[0].id
+
+ severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
+ if len(severity_list) != 1:
+ return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return (True, changed, "Issue created", issue)
+
+ else:
+ # If does not exist, do nothing
+ return (True, changed, "Issue does not exist", {})
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return (True, changed, "Issue deleted", {})
+
+ else:
+ # Do nothing
+ return (True, changed, "Issue already exists", {})
+
+ else:
+ # More than 1 matching issue
+ return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
+
+ except TaigaException:
+ msg = "An exception happened: %s" % sys.exc_info()[1]
+ return (False, changed, msg, {})
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(required=False, default="https://api.taiga.io"),
+ project=dict(required=True),
+ subject=dict(required=True),
+ issue_type=dict(required=True),
+ priority=dict(required=False, default="Normal"),
+ status=dict(required=False, default="New"),
+ severity=dict(required=False, default="Normal"),
+ description=dict(required=False, default=""),
+ attachment=dict(required=False, default=None),
+ attachment_description=dict(required=False, default=""),
+ tags=dict(required=False, default=[], type='list'),
+ state=dict(required=False, choices=['present','absent'], default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ msg = "This module needs python-taiga module"
+ module.fail_json(msg=msg)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ module,
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if len(issue_attr_dict) > 0:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/windows/__init__.py b/lib/ansible/modules/extras/windows/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/__init__.py
diff --git a/lib/ansible/modules/extras/windows/win_acl.ps1 b/lib/ansible/modules/extras/windows/win_acl.ps1
new file mode 100644
index 0000000000..2e20793e1f
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_acl.ps1
@@ -0,0 +1,183 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
+# Copyright 2015, Trond Hindenes
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+# win_acl module (File/Resources Permission Additions/Removal)
+
+
+#Functions
+Function UserSearch
+{
+ Param ([string]$accountName)
+ #Check if there's a realm specified
+
+ $searchDomain = $false
+ $searchDomainUPN = $false
+ if ($accountName.Split("\").count -gt 1)
+ {
+ if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME)
+ {
+ $searchDomain = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ }
+ Elseif ($accountName.contains("@"))
+ {
+ $searchDomain = $true
+ $searchDomainUPN = $true
+ }
+ Else
+ {
+ #Default to local user account
+ $accountName = $env:COMPUTERNAME + "\" + $accountName
+ }
+
+ if ($searchDomain -eq $false)
+ {
+ # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too
+ $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName}
+ if ($localaccount)
+ {
+ return $localaccount.SID
+ }
+ }
+ Else
+ {
+ #Search by samaccountname
+ $Searcher = [adsisearcher]""
+
+ If ($searchDomainUPN -eq $false) {
+ $Searcher.Filter = "sAMAccountName=$($accountName)"
+ }
+ Else {
+ $Searcher.Filter = "userPrincipalName=$($accountName)"
+ }
+
+ $result = $Searcher.FindOne()
+ if ($result)
+ {
+ $user = $result.GetDirectoryEntry()
+
+ # get binary SID from AD account
+ $binarySID = $user.ObjectSid.Value
+
+ # convert to string SID
+ return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value
+ }
+ }
+}
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr $params "path" -failifempty $true
+$user = Get-Attr $params "user" -failifempty $true
+$rights = Get-Attr $params "rights" -failifempty $true
+
+$type = Get-Attr $params "type" -failifempty $true -validateSet "allow","deny" -resultobj $result
+$state = Get-Attr $params "state" "present" -validateSet "present","absent" -resultobj $result
+
+$inherit = Get-Attr $params "inherit" ""
+$propagation = Get-Attr $params "propagation" "None" -validateSet "None","NoPropagateInherit","InheritOnly" -resultobj $result
+
+If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path file or directory does not exist on the host"
+}
+
+# Test that the user/group is resolvable on the local machine
+$sid = UserSearch -AccountName ($user)
+if (!$sid)
+{
+ Fail-Json $result "$user is not a valid user or group on the host machine or domain"
+}
+
+If (Test-Path -Path $path -PathType Leaf) {
+ $inherit = "None"
+}
+ElseIf ($inherit -eq "") {
+ $inherit = "ContainerInherit, ObjectInherit"
+}
+
+Try {
+ $colRights = [System.Security.AccessControl.FileSystemRights]$rights
+ $InheritanceFlag = [System.Security.AccessControl.InheritanceFlags]$inherit
+ $PropagationFlag = [System.Security.AccessControl.PropagationFlags]$propagation
+
+ If ($type -eq "allow") {
+ $objType =[System.Security.AccessControl.AccessControlType]::Allow
+ }
+ Else {
+ $objType =[System.Security.AccessControl.AccessControlType]::Deny
+ }
+
+ $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid)
+ $objACE = New-Object System.Security.AccessControl.FileSystemAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType)
+ $objACL = Get-ACL $path
+
+ # Check if the ACE exists already in the objects ACL list
+ $match = $false
+ ForEach($rule in $objACL.Access){
+ $ruleIdentity = $rule.IdentityReference.Translate([System.Security.Principal.SecurityIdentifier])
+ If (($rule.FileSystemRights -eq $objACE.FileSystemRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($ruleIdentity -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) {
+ $match = $true
+ Break
+ }
+ }
+
+ If ($state -eq "present" -And $match -eq $false) {
+ Try {
+ $objACL.AddAccessRule($objACE)
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+ Catch {
+ Fail-Json $result "an exception occured when adding the specified rule"
+ }
+ }
+ ElseIf ($state -eq "absent" -And $match -eq $true) {
+ Try {
+ $objACL.RemoveAccessRule($objACE)
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+ Catch {
+ Fail-Json $result "an exception occured when removing the specified rule"
+ }
+ }
+ Else {
+ # A rule was attempting to be added but already exists
+ If ($match -eq $true) {
+ Exit-Json $result "the specified rule already exists"
+ }
+ # A rule didn't exist that was trying to be removed
+ Else {
+ Exit-Json $result "the specified rule does not exist"
+ }
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to $state $rights permission(s) on $path for $user"
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_acl.py b/lib/ansible/modules/extras/windows/win_acl.py
new file mode 100644
index 0000000000..89ec45c7e0
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_acl.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
+# Copyright 2015, Trond Hindenes
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_acl
+version_added: "2.0"
+short_description: Set file/directory permissions for a system user or group.
+description:
+ - Add or remove rights/permissions for a given user or group for the specified src file or folder.
+options:
+ path:
+ description:
+ - File or Directory
+ required: yes
+ user:
+ description:
+ - User or Group to add specified rights to act on src file/folder
+ required: yes
+ default: none
+ state:
+ description:
+ - Specify whether to add C(present) or remove C(absent) the specified access rule
+ required: no
+ choices:
+ - present
+ - absent
+ default: present
+ type:
+ description:
+ - Specify whether to allow or deny the rights specified
+ required: yes
+ choices:
+ - allow
+ - deny
+ default: none
+ rights:
+ description:
+ - The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory. Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights Enumeration.
+ required: yes
+ choices:
+ - AppendData
+ - ChangePermissions
+ - Delete
+ - DeleteSubdirectoriesAndFiles
+ - ExecuteFile
+ - FullControl
+ - ListDirectory
+ - Modify
+ - Read
+ - ReadAndExecute
+ - ReadAttributes
+ - ReadData
+ - ReadExtendedAttributes
+ - ReadPermissions
+ - Synchronize
+ - TakeOwnership
+ - Traverse
+ - Write
+ - WriteAttributes
+ - WriteData
+ - WriteExtendedAttributes
+ default: none
+ inherit:
+ description:
+ - Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on the choices see MSDN InheritanceFlags Enumeration.
+ required: no
+ choices:
+ - ContainerInherit
+ - ObjectInherit
+ - None
+ default: For Leaf File, None; For Directory, ContainerInherit, ObjectInherit;
+ propagation:
+ description:
+ - Propagation flag on the ACL rules. For more information on the choices see MSDN PropagationFlags Enumeration.
+ required: no
+ choices:
+ - None
+ - NoPropagateInherit
+ - InheritOnly
+ default: "None"
+author: Phil Schwartz (@schwartzmx), Trond Hindenes (@trondhindenes), Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Restrict write,execute access to User Fed-Phil
+$ ansible -i hosts -m win_acl -a "user=Fed-Phil path=C:\Important\Executable.exe type=deny rights='ExecuteFile,Write'" all
+
+# Playbook example
+# Add access rule to allow IIS_IUSRS FullControl to MySite
+---
+- name: Add IIS_IUSRS allow rights
+ win_acl:
+ path: 'C:\inetpub\wwwroot\MySite'
+ user: 'IIS_IUSRS'
+ rights: 'FullControl'
+ type: 'allow'
+ state: 'present'
+ inherit: 'ContainerInherit, ObjectInherit'
+ propagation: 'None'
+
+# Remove previously added rule for IIS_IUSRS
+- name: Remove FullControl AccessRule for IIS_IUSRS
+ path: 'C:\inetpub\wwwroot\MySite'
+ user: 'IIS_IUSRS'
+ rights: 'FullControl'
+ type: 'allow'
+ state: 'absent'
+ inherit: 'ContainerInherit, ObjectInherit'
+ propagation: 'None'
+
+# Deny Intern
+- name: Deny Deny
+ path: 'C:\Administrator\Documents'
+ user: 'Intern'
+ rights: 'Read,Write,Modify,FullControl,Delete'
+ type: 'deny'
+ state: 'present'
+'''
diff --git a/lib/ansible/modules/extras/windows/win_acl_inheritance.ps1 b/lib/ansible/modules/extras/windows/win_acl_inheritance.ps1
new file mode 100644
index 0000000000..1933a3a5dd
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_acl_inheritance.ps1
@@ -0,0 +1,86 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr $params "path" -failifempty $true
+$state = Get-Attr $params "state" "absent" -validateSet "present","absent" -resultobj $result
+$reorganize = Get-Attr $params "reorganize" "no" -validateSet "no","yes" -resultobj $result
+$reorganize = $reorganize | ConvertTo-Bool
+
+If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path file or directory does not exist on the host"
+}
+
+Try {
+ $objACL = Get-ACL $path
+ $inheritanceEnabled = !$objACL.AreAccessRulesProtected
+
+ If (($state -eq "present") -And !$inheritanceEnabled) {
+ # second parameter is ignored if first=$False
+ $objACL.SetAccessRuleProtection($False, $False)
+
+ If ($reorganize) {
+ # it wont work without intermediate save, state would be the same
+ Set-ACL $path $objACL
+ $objACL = Get-ACL $path
+
+ # convert explicit ACE to inherited ACE
+ ForEach($inheritedRule in $objACL.Access) {
+ If (!$inheritedRule.IsInherited) {
+ Continue
+ }
+
+ ForEach($explicitRrule in $objACL.Access) {
+ If ($explicitRrule.IsInherited) {
+ Continue
+ }
+
+ If (($inheritedRule.FileSystemRights -eq $explicitRrule.FileSystemRights) -And ($inheritedRule.AccessControlType -eq $explicitRrule.AccessControlType) -And ($inheritedRule.IdentityReference -eq $explicitRrule.IdentityReference) -And ($inheritedRule.InheritanceFlags -eq $explicitRrule.InheritanceFlags) -And ($inheritedRule.PropagationFlags -eq $explicitRrule.PropagationFlags)) {
+ $objACL.RemoveAccessRule($explicitRrule)
+ }
+ }
+ }
+ }
+
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+ Elseif (($state -eq "absent") -And $inheritanceEnabled) {
+ If ($reorganize) {
+ $objACL.SetAccessRuleProtection($True, $True)
+ } Else {
+ $objACL.SetAccessRuleProtection($True, $False)
+ }
+
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to disable inheritance"
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_acl_inheritance.py b/lib/ansible/modules/extras/windows/win_acl_inheritance.py
new file mode 100644
index 0000000000..a4bb90a47b
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_acl_inheritance.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_acl_inheritance
+version_added: "2.1"
+short_description: Change ACL inheritance
+description:
+ - Change ACL (Access Control List) inheritance and optionally copy inherited ACE's (Access Control Entry) to dedicated ACE's or vice versa.
+options:
+ path:
+ description:
+ - Path to be used for changing inheritance
+ required: true
+ state:
+ description:
+ - Specify whether to enable I(present) or disable I(absent) ACL inheritance
+ required: false
+ choices:
+ - present
+ - absent
+ default: absent
+ reorganize:
+ description:
+ - For P(state) = I(absent), indicates if the inherited ACE's should be copied from the parent directory. This is necessary (in combination with removal) for a simple ACL instead of using multiple ACE deny entries.
+ - For P(state) = I(present), indicates if the inherited ACE's should be deduplicated compared to the parent directory. This removes complexity of the ACL structure.
+ required: false
+ choices:
+ - no
+ - yes
+ default: no
+author: Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Playbook example
+---
+- name: Disable inherited ACE's
+ win_acl_inheritance:
+ path: 'C:\\apache\\'
+ state: absent
+
+- name: Disable and copy inherited ACE's
+ win_acl_inheritance:
+ path: 'C:\\apache\\'
+ state: absent
+ reorganize: yes
+
+- name: Enable and remove dedicated ACE's
+ win_acl_inheritance:
+ path: 'C:\\apache\\'
+ state: present
+ reorganize: yes
+'''
+
+RETURN = '''
+
+''' \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_chocolatey.ps1 b/lib/ansible/modules/extras/windows/win_chocolatey.ps1
new file mode 100644
index 0000000000..3bb6a1f0dc
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_chocolatey.ps1
@@ -0,0 +1,371 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2014, Trond Hindenes <trond@hindenes.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$package = Get-Attr -obj $params -name name -failifempty $true -emptyattributefailmessage "missing required argument: name"
+$force = Get-Attr -obj $params -name force -default "false" | ConvertTo-Bool
+$upgrade = Get-Attr -obj $params -name upgrade -default "false" | ConvertTo-Bool
+$version = Get-Attr -obj $params -name version -default $null
+
+$source = Get-Attr -obj $params -name source -default $null
+if ($source) {$source = $source.Tolower()}
+
+$showlog = Get-Attr -obj $params -name showlog -default "false" | ConvertTo-Bool
+$state = Get-Attr -obj $params -name state -default "present"
+
+$installargs = Get-Attr -obj $params -name install_args -default $null
+$packageparams = Get-Attr -obj $params -name params -default $null
+$allowemptychecksums = Get-Attr -obj $params -name allow_empty_checksums -default "false" | ConvertTo-Bool
+$ignorechecksums = Get-Attr -obj $params -name ignore_checksums -default "false" | ConvertTo-Bool
+$ignoredependencies = Get-Attr -obj $params -name ignore_dependencies -default "false" | ConvertTo-Bool
+
+# as of chocolatey 0.9.10, nonzero success exit codes can be returned
+# see https://github.com/chocolatey/choco/issues/512#issuecomment-214284461
+$successexitcodes = (0,1605,1614,1641,3010)
+
+if ("present","absent" -notcontains $state)
+{
+ Fail-Json $result "state is $state; must be present or absent"
+}
+
+
+Function Chocolatey-Install-Upgrade
+{
+ [CmdletBinding()]
+
+ param()
+
+ $ChocoAlreadyInstalled = get-command choco -ErrorAction 0
+ if ($ChocoAlreadyInstalled -eq $null)
+ {
+ #We need to install chocolatey
+ $install_output = (new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1") | powershell -
+ if ($LASTEXITCODE -ne 0)
+ {
+ Set-Attr $result "choco_bootstrap_output" $install_output
+ Fail-Json $result "Chocolatey bootstrap installation failed."
+ }
+ $result.changed = $true
+ $script:executable = "C:\ProgramData\chocolatey\bin\choco.exe"
+ }
+ else
+ {
+ $script:executable = "choco.exe"
+
+ if ([Version](choco --version) -lt [Version]'0.9.9')
+ {
+ Choco-Upgrade chocolatey
+ }
+ }
+}
+
+
+Function Choco-IsInstalled
+{
+ [CmdletBinding()]
+
+ param(
+ [Parameter(Mandatory=$true, Position=1)]
+ [string]$package
+ )
+
+ $cmd = "$executable list --local-only $package"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "choco_error_cmd" $cmd
+ Set-Attr $result "choco_error_log" "$results"
+
+ Throw "Error checking installation status for $package"
+ }
+
+ If ("$results" -match "$package .* (\d+) packages installed.")
+ {
+ return $matches[1] -gt 0
+ }
+
+ $false
+}
+
+Function Choco-Upgrade
+{
+ [CmdletBinding()]
+
+ param(
+ [Parameter(Mandatory=$true, Position=1)]
+ [string]$package,
+ [Parameter(Mandatory=$false, Position=2)]
+ [string]$version,
+ [Parameter(Mandatory=$false, Position=3)]
+ [string]$source,
+ [Parameter(Mandatory=$false, Position=4)]
+ [bool]$force,
+ [Parameter(Mandatory=$false, Position=5)]
+ [string]$installargs,
+ [Parameter(Mandatory=$false, Position=6)]
+ [string]$packageparams,
+ [Parameter(Mandatory=$false, Position=7)]
+ [bool]$allowemptychecksums,
+ [Parameter(Mandatory=$false, Position=8)]
+ [bool]$ignorechecksums,
+ [Parameter(Mandatory=$false, Position=9)]
+ [bool]$ignoredependencies
+ )
+
+ if (-not (Choco-IsInstalled $package))
+ {
+ throw "$package is not installed, you cannot upgrade"
+ }
+
+ $cmd = "$executable upgrade -dv -y $package"
+
+ if ($version)
+ {
+ $cmd += " -version $version"
+ }
+
+ if ($source)
+ {
+ $cmd += " -source $source"
+ }
+
+ if ($force)
+ {
+ $cmd += " -force"
+ }
+
+ if ($installargs)
+ {
+ $cmd += " -installargs '$installargs'"
+ }
+
+ if ($packageparams)
+ {
+ $cmd += " -params '$packageparams'"
+ }
+
+ if ($allowemptychecksums)
+ {
+ $cmd += " --allow-empty-checksums"
+ }
+
+ if ($ignorechecksums)
+ {
+ $cmd += " --ignore-checksums"
+ }
+
+ if ($ignoredependencies)
+ {
+ $cmd += " -ignoredependencies"
+ }
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -notin $successexitcodes)
+ {
+ Set-Attr $result "choco_error_cmd" $cmd
+ Set-Attr $result "choco_error_log" "$results"
+ Throw "Error installing $package"
+ }
+
+ if ("$results" -match ' upgraded (\d+)/\d+ package\(s\)\. ')
+ {
+ if ($matches[1] -gt 0)
+ {
+ $result.changed = $true
+ }
+ }
+}
+
+Function Choco-Install
+{
+ [CmdletBinding()]
+
+ param(
+ [Parameter(Mandatory=$true, Position=1)]
+ [string]$package,
+ [Parameter(Mandatory=$false, Position=2)]
+ [string]$version,
+ [Parameter(Mandatory=$false, Position=3)]
+ [string]$source,
+ [Parameter(Mandatory=$false, Position=4)]
+ [bool]$force,
+ [Parameter(Mandatory=$false, Position=5)]
+ [bool]$upgrade,
+ [Parameter(Mandatory=$false, Position=6)]
+ [string]$installargs,
+ [Parameter(Mandatory=$false, Position=7)]
+ [string]$packageparams,
+ [Parameter(Mandatory=$false, Position=8)]
+ [bool]$allowemptychecksums,
+ [Parameter(Mandatory=$false, Position=9)]
+ [bool]$ignorechecksums,
+ [Parameter(Mandatory=$false, Position=10)]
+ [bool]$ignoredependencies
+ )
+
+ if (Choco-IsInstalled $package)
+ {
+ if ($upgrade)
+ {
+ Choco-Upgrade -package $package -version $version -source $source -force $force `
+ -installargs $installargs -packageparams $packageparams `
+ -allowemptychecksums $allowemptychecksums -ignorechecksums $ignorechecksums `
+ -ignoredependencies $ignoredependencies
+
+ return
+ }
+
+ if (-not $force)
+ {
+ return
+ }
+ }
+
+ $cmd = "$executable install -dv -y $package"
+
+ if ($version)
+ {
+ $cmd += " -version $version"
+ }
+
+ if ($source)
+ {
+ $cmd += " -source $source"
+ }
+
+ if ($force)
+ {
+ $cmd += " -force"
+ }
+
+ if ($installargs)
+ {
+ $cmd += " -installargs '$installargs'"
+ }
+
+ if ($packageparams)
+ {
+ $cmd += " -params '$packageparams'"
+ }
+
+ if ($allowemptychecksums)
+ {
+ $cmd += " --allow-empty-checksums"
+ }
+
+ if ($ignorechecksums)
+ {
+ $cmd += " --ignore-checksums"
+ }
+
+ if ($ignoredependencies)
+ {
+ $cmd += " -ignoredependencies"
+ }
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -notin $successexitcodes)
+ {
+ Set-Attr $result "choco_error_cmd" $cmd
+ Set-Attr $result "choco_error_log" "$results"
+ Throw "Error installing $package"
+ }
+
+ $result.changed = $true
+}
+
+Function Choco-Uninstall
+{
+ [CmdletBinding()]
+
+ param(
+ [Parameter(Mandatory=$true, Position=1)]
+ [string]$package,
+ [Parameter(Mandatory=$false, Position=2)]
+ [string]$version,
+ [Parameter(Mandatory=$false, Position=3)]
+ [bool]$force
+ )
+
+ if (-not (Choco-IsInstalled $package))
+ {
+ return
+ }
+
+ $cmd = "$executable uninstall -dv -y $package"
+
+ if ($version)
+ {
+ $cmd += " -version $version"
+ }
+
+ if ($force)
+ {
+ $cmd += " -force"
+ }
+
+ if ($packageparams)
+ {
+ $cmd += " -params '$packageparams'"
+ }
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -notin $successexitcodes)
+ {
+ Set-Attr $result "choco_error_cmd" $cmd
+ Set-Attr $result "choco_error_log" "$results"
+ Throw "Error uninstalling $package"
+ }
+
+ $result.changed = $true
+}
+Try
+{
+ Chocolatey-Install-Upgrade
+
+ if ($state -eq "present")
+ {
+ Choco-Install -package $package -version $version -source $source `
+ -force $force -upgrade $upgrade -installargs $installargs `
+ -packageparams $packageparams -allowemptychecksums $allowemptychecksums `
+ -ignorechecksums $ignorechecksums -ignoredependencies $ignoredependencies
+ }
+ else
+ {
+ Choco-Uninstall -package $package -version $version -force $force
+ }
+
+ Exit-Json $result;
+}
+Catch
+{
+ Fail-Json $result $_.Exception.Message
+}
+
+
diff --git a/lib/ansible/modules/extras/windows/win_chocolatey.py b/lib/ansible/modules/extras/windows/win_chocolatey.py
new file mode 100644
index 0000000000..ac80ad9e18
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_chocolatey.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Trond Hindenes <trond@hindenes.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_chocolatey
+version_added: "1.9"
+short_description: Installs packages using chocolatey
+description:
+ - Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages can be found at http://chocolatey.org/packages
+options:
+ name:
+ description:
+ - Name of the package to be installed
+ required: true
+ state:
+ description:
+ - State of the package on the system
+ choices:
+ - present
+ - absent
+ default: present
+ force:
+ description:
+ - Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made
+ choices:
+ - yes
+ - no
+ default: no
+ upgrade:
+ description:
+ - If package is already installed it, try to upgrade to the latest version or to the specified version
+ choices:
+ - yes
+ - no
+ default: no
+ version:
+ description:
+ - Specific version of the package to be installed
+ - Ignored when state == 'absent'
+ source:
+ description:
+ - Specify source rather than using default chocolatey repository
+ install_args:
+ description:
+ - Arguments to pass to the native installer
+ version_added: '2.1'
+ params:
+ description:
+ - Parameters to pass to the package
+ version_added: '2.1'
+ allow_empty_checksums:
+ description:
+ - Allow empty Checksums to be used
+ require: false
+ default: false
+ version_added: '2.2'
+ ignore_checksums:
+ description:
+ - Ignore Checksums
+ require: false
+ default: false
+ version_added: '2.2'
+ ignore_dependencies:
+ description:
+ - Ignore dependencies, only install/upgrade the package itself
+ default: false
+ version_added: '2.1'
+author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
+'''
+
+# TODO:
+# * Better parsing when a package has dependencies - currently fails
+# * Time each item that is run
+# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
+
+EXAMPLES = '''
+ # Install git
+ win_chocolatey:
+ name: git
+
+ # Install notepadplusplus version 6.6
+ win_chocolatey:
+ name: notepadplusplus.install
+ version: 6.6
+
+ # Uninstall git
+ win_chocolatey:
+ name: git
+ state: absent
+
+ # Install git from specified repository
+ win_chocolatey:
+ name: git
+ source: https://someserver/api/v2/
+'''
diff --git a/lib/ansible/modules/extras/windows/win_dotnet_ngen.ps1 b/lib/ansible/modules/extras/windows/win_dotnet_ngen.ps1
new file mode 100644
index 0000000000..52b4ebf82d
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_dotnet_ngen.ps1
@@ -0,0 +1,69 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Peter Mounce <public@neverrunwithscissors.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+function Invoke-NGen
+{
+ [CmdletBinding()]
+
+ param
+ (
+ [Parameter(Mandatory=$false, Position=0)] [string] $arity = ""
+ )
+
+ if ($arity -eq $null)
+ {
+ $arity = ""
+ }
+ $cmd = "$($env:windir)\microsoft.net\framework$($arity)\v4.0.30319\ngen.exe"
+ if (test-path $cmd)
+ {
+ $update = Invoke-Expression "$cmd update /force";
+ Set-Attr $result "dotnet_ngen$($arity)_update_exit_code" $lastexitcode
+ Set-Attr $result "dotnet_ngen$($arity)_update_output" $update
+ $eqi = Invoke-Expression "$cmd executequeueditems";
+ Set-Attr $result "dotnet_ngen$($arity)_eqi_exit_code" $lastexitcode
+ Set-Attr $result "dotnet_ngen$($arity)_eqi_output" $eqi
+
+ $result.changed = $true
+ }
+ else
+ {
+ Write-Host "Not found: $cmd"
+ }
+}
+
+Try
+{
+ Invoke-NGen
+ Invoke-NGen -arity "64"
+
+ Exit-Json $result;
+}
+Catch
+{
+ Fail-Json $result $_.Exception.Message
+}
diff --git a/lib/ansible/modules/extras/windows/win_dotnet_ngen.py b/lib/ansible/modules/extras/windows/win_dotnet_ngen.py
new file mode 100644
index 0000000000..75ce9cc138
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_dotnet_ngen.py
@@ -0,0 +1,44 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Peter Mounce <public@neverrunwithscissors.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_dotnet_ngen
+version_added: "2.0"
+short_description: Runs ngen to recompile DLLs after .NET updates
+description:
+ - After .NET framework is installed/updated, Windows will probably want to recompile things to optimise for the host.
+ - This happens via scheduled task, usually at some inopportune time.
+ - This module allows you to run this task on your own schedule, so you incur the CPU hit at some more convenient and controlled time.
+ - "http://blogs.msdn.com/b/dotnet/archive/2013/08/06/wondering-why-mscorsvw-exe-has-high-cpu-usage-you-can-speed-it-up.aspx"
+notes:
+ - there are in fact two scheduled tasks for ngen but they have no triggers so aren't a problem
+ - there's no way to test if they've been completed (?)
+ - the stdout is quite likely to be several megabytes
+author: Peter Mounce
+'''
+
+EXAMPLES = '''
+ # Run ngen tasks
+ win_dotnet_ngen:
+'''
diff --git a/lib/ansible/modules/extras/windows/win_environment.ps1 b/lib/ansible/modules/extras/windows/win_environment.ps1
new file mode 100644
index 0000000000..f1acfe1935
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_environment.ps1
@@ -0,0 +1,53 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$state = Get-AnsibleParam -obj $params -name "state" -default "present" -validateSet "present","absent"
+$name = Get-AnsibleParam -obj $params -name "name" -failifempty $true
+$level = Get-AnsibleParam -obj $params -name "level" -validateSet "machine","process","user" -failifempty $true
+$value = Get-AnsibleParam -obj $params -name "value"
+
+If ($level) {
+ $level = $level.ToString().ToLower()
+}
+
+$before_value = [Environment]::GetEnvironmentVariable($name, $level)
+
+$state = $state.ToString().ToLower()
+if ($state -eq "present" ) {
+ [Environment]::SetEnvironmentVariable($name, $value, $level)
+} Elseif ($state -eq "absent") {
+ [Environment]::SetEnvironmentVariable($name, $null, $level)
+}
+
+$after_value = [Environment]::GetEnvironmentVariable($name, $level)
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+Set-Attr $result "name" $name;
+Set-Attr $result "before_value" $before_value;
+Set-Attr $result "value" $after_value;
+Set-Attr $result "level" $level;
+if ($before_value -ne $after_value) {
+ Set-Attr $result "changed" $true;
+}
+
+Exit-Json $result;
diff --git a/lib/ansible/modules/extras/windows/win_environment.py b/lib/ansible/modules/extras/windows/win_environment.py
new file mode 100644
index 0000000000..522eff6a8d
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_environment.py
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_environment
+version_added: "2.0"
+short_description: Modifies environment variables on windows hosts.
+description:
+ - Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level.
+ - User level environment variables will be set, but not available until the user has logged off and on again.
+options:
+ state:
+ description:
+ - present to ensure environment variable is set, or absent to ensure it is removed
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - The name of the environment variable
+ required: true
+ default: no default
+ value:
+ description:
+ - The value to store in the environment variable. Can be omitted for state=absent
+ required: false
+ default: no default
+ level:
+ description:
+ - The level at which to set the environment variable.
+ - Use 'machine' to set for all users.
+ - Use 'user' to set for the current user that ansible is connected as.
+ - Use 'process' to set for the current process. Probably not that useful.
+ required: true
+ default: no default
+ choices:
+ - machine
+ - process
+ - user
+author: "Jon Hawkesworth (@jhawkesworth)"
+notes:
+ - This module does not broadcast change events.
+ This means that the minority of windows applications which can have
+ their environment changed without restarting will not be notified and
+ therefore will need restarting to pick up new environment settings.
+ User level environment variables will require the user to log out
+ and in again before they become available.
+'''
+
+EXAMPLES = '''
+ # Set an environment variable for all users
+ win_environment:
+ state: present
+ name: TestVariable
+ value: "Test value"
+ level: machine
+ # Remove an environment variable for the current users
+ win_environment:
+ state: absent
+ name: TestVariable
+ level: user
+'''
+
diff --git a/lib/ansible/modules/extras/windows/win_file_version.ps1 b/lib/ansible/modules/extras/windows/win_file_version.ps1
new file mode 100644
index 0000000000..2e2f341c46
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_file_version.ps1
@@ -0,0 +1,78 @@
+#!powershell
+
+#this file is part of Ansible
+#Copyright © 2015 Sam Liu <sam.liu@activenetwork.com>
+
+#This program is free software: you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation, either version 3 of the License, or
+#(at your option) any later version.
+
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_file_version = New-Object psobject
+ changed = $false
+}
+
+$path = Get-AnsibleParam $params "path" -failifempty $true -resultobj $result
+
+If (-Not (Test-Path -Path $path -PathType Leaf)){
+ Fail-Json $result "Specfied path $path does exist or is not a file."
+}
+$ext = [System.IO.Path]::GetExtension($path)
+If ( $ext -notin '.exe', '.dll'){
+ Fail-Json $result "Specfied path $path is not a vaild file type; must be DLL or EXE."
+}
+
+Try {
+ $_version_fields = [System.Diagnostics.FileVersionInfo]::GetVersionInfo($path)
+ $file_version = $_version_fields.FileVersion
+ If ($file_version -eq $null){
+ $file_version = ''
+ }
+ $product_version = $_version_fields.ProductVersion
+ If ($product_version -eq $null){
+ $product_version= ''
+ }
+ $file_major_part = $_version_fields.FileMajorPart
+ If ($file_major_part -eq $null){
+ $file_major_part= ''
+ }
+ $file_minor_part = $_version_fields.FileMinorPart
+ If ($file_minor_part -eq $null){
+ $file_minor_part= ''
+ }
+ $file_build_part = $_version_fields.FileBuildPart
+ If ($file_build_part -eq $null){
+ $file_build_part = ''
+ }
+ $file_private_part = $_version_fields.FilePrivatePart
+ If ($file_private_part -eq $null){
+ $file_private_part = ''
+ }
+}
+Catch{
+ Fail-Json $result "Error: $_.Exception.Message"
+}
+
+Set-Attr $result.win_file_version "path" $path.toString()
+Set-Attr $result.win_file_version "file_version" $file_version.toString()
+Set-Attr $result.win_file_version "product_version" $product_version.toString()
+Set-Attr $result.win_file_version "file_major_part" $file_major_part.toString()
+Set-Attr $result.win_file_version "file_minor_part" $file_minor_part.toString()
+Set-Attr $result.win_file_version "file_build_part" $file_build_part.toString()
+Set-Attr $result.win_file_version "file_private_part" $file_private_part.toString()
+Exit-Json $result;
+
diff --git a/lib/ansible/modules/extras/windows/win_file_version.py b/lib/ansible/modules/extras/windows/win_file_version.py
new file mode 100644
index 0000000000..4f2ecc0d61
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_file_version.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Get DLL or EXE build version
+# Copyright © 2015 Sam Liu <sam.liu@activenetwork.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_file_version
+version_added: "2.1"
+short_description: Get DLL or EXE file build version
+description:
+ - Get DLL or EXE file build version
+ - change state alway be false
+options:
+ path:
+ description:
+ - File to get version(provide absolute path)
+ required: true
+ aliases: []
+author: Sam Liu
+'''
+
+EXAMPLES = '''
+# get C:\Windows\System32\cmd.exe version in playbook
+---
+- name: Get acm instance version
+ win_file_version:
+ path: 'C:\Windows\System32\cmd.exe'
+ register: exe_file_version
+
+- debug: msg="{{exe_file_version}}"
+
+'''
+
+RETURN = """
+win_file_version.path:
+ description: file path
+ returned: always
+ type: string
+
+win_file_version.file_version:
+ description: file version number.
+ returned: no error
+ type: string
+
+win_file_version.product_version:
+ description: the version of the product this file is distributed with.
+ returned: no error
+ type: string
+
+win_file_version.file_major_part:
+ description: the major part of the version number.
+ returned: no error
+ type: string
+
+win_file_version.file_minor_part:
+ description: the minor part of the version number of the file.
+ returned: no error
+ type: string
+
+win_file_version.file_build_part:
+ description: build number of the file.
+ returned: no error
+ type: string
+
+win_file_version.file_private_part:
+ description: file private part number.
+ returned: no error
+ type: string
+
+"""
diff --git a/lib/ansible/modules/extras/windows/win_firewall_rule.ps1 b/lib/ansible/modules/extras/windows/win_firewall_rule.ps1
new file mode 100644
index 0000000000..a63cedec0c
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_firewall_rule.ps1
@@ -0,0 +1,362 @@
+#!powershell
+#
+# (c) 2014, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# WANT_JSON
+# POWERSHELL_COMMON
+
+function getFirewallRule ($fwsettings) {
+ try {
+
+ #$output = Get-NetFirewallRule -name $($fwsettings.'Rule Name');
+ $rawoutput=@(netsh advfirewall firewall show rule name="$($fwsettings.'Rule Name')" verbose)
+ if (!($rawoutput -eq 'No rules match the specified criteria.')){
+ $rawoutput | Where {$_ -match '^([^:]+):\s*(\S.*)$'} | Foreach -Begin {
+ $FirstRun = $true;
+ $HashProps = @{};
+ } -Process {
+ if (($Matches[1] -eq 'Rule Name') -and (!($FirstRun))) {
+ #$output=New-Object -TypeName PSCustomObject -Property $HashProps;
+ $output=$HashProps;
+ $HashProps = @{};
+ };
+ $HashProps.$($Matches[1]) = $Matches[2];
+ $FirstRun = $false;
+ } -End {
+ #$output=New-Object -TypeName PSCustomObject -Property $HashProps;
+ $output=$HashProps;
+ }
+ }
+ $exists=$false;
+ $correct=$true;
+ $diff=$false;
+ $multi=$false;
+ $correct=$false;
+ $difference=@();
+ $msg=@();
+ if ($($output|measure).count -gt 0) {
+ $exists=$true;
+ $msg += @("The rule '" + $fwsettings.'Rule Name' + "' exists.");
+ if ($($output|measure).count -gt 1) {
+ $multi=$true
+ $msg += @("The rule '" + $fwsettings.'Rule Name' + "' has multiple entries.");
+ ForEach($rule in $output.GetEnumerator()) {
+ ForEach($fwsetting in $fwsettings.GetEnumerator()) {
+ if ( $rule.$fwsetting -ne $fwsettings.$fwsetting) {
+ $diff=$true;
+ #$difference+=@($fwsettings.$($fwsetting.Key));
+ $difference+=@("output:$rule.$fwsetting,fwsetting:$fwsettings.$fwsetting");
+ };
+ };
+ if ($diff -eq $false) {
+ $correct=$true
+ };
+ };
+ } else {
+ ForEach($fwsetting in $fwsettings.GetEnumerator()) {
+ if ( $output.$($fwsetting.Key) -ne $fwsettings.$($fwsetting.Key)) {
+
+ if (($fwsetting.Key -eq 'RemoteIP') -and ($output.$($fwsetting.Key) -eq ($fwsettings.$($fwsetting.Key)+'-'+$fwsettings.$($fwsetting.Key)))) {
+ $donothing=$false
+ } elseif (($fwsetting.Key -eq 'DisplayName') -and ($output."Rule Name" -eq $fwsettings.$($fwsetting.Key))) {
+ $donothing=$false
+ } else {
+ $diff=$true;
+ $difference+=@($fwsettings.$($fwsetting.Key));
+ };
+ };
+ };
+ if ($diff -eq $false) {
+ $correct=$true
+ };
+ };
+ if ($correct) {
+ $msg += @("An identical rule exists");
+ } else {
+ $msg += @("The rule exists but has different values");
+ }
+ } else {
+ $msg += @("No rule could be found");
+ };
+ $result = @{
+ failed = $false
+ exists = $exists
+ identical = $correct
+ multiple = $multi
+ difference = $difference
+ msg = $msg
+ }
+ } catch [Exception]{
+ $result = @{
+ failed = $true
+ error = $_.Exception.Message
+ msg = $msg
+ }
+ };
+ return $result
+};
+
+function createFireWallRule ($fwsettings) {
+ $msg=@()
+ $execString="netsh advfirewall firewall add rule"
+
+ ForEach ($fwsetting in $fwsettings.GetEnumerator()) {
+ if ($fwsetting.key -eq 'Direction') {
+ $key='dir'
+ } elseif ($fwsetting.key -eq 'Rule Name') {
+ $key='name'
+ } elseif ($fwsetting.key -eq 'Enabled') {
+ $key='enable'
+ } elseif ($fwsetting.key -eq 'Profiles') {
+ $key='profile'
+ } else {
+ $key=$($fwsetting.key).ToLower()
+ };
+ $execString+=" ";
+ $execString+=$key;
+ $execString+="=";
+ $execString+='"';
+ $execString+=$fwsetting.value;
+ $execString+='"';
+ };
+ try {
+ #$msg+=@($execString);
+ $output=$(Invoke-Expression $execString| ? {$_});
+ $msg+=@("Created firewall rule $name");
+
+ $result=@{
+ failed = $false
+ output=$output
+ changed=$true
+ msg=$msg
+ };
+
+ } catch [Exception]{
+ $msg=@("Failed to create the rule")
+ $result=@{
+ output=$output
+ failed=$true
+ error=$_.Exception.Message
+ msg=$msg
+ };
+ };
+ return $result
+};
+
+function removeFireWallRule ($fwsettings) {
+ $msg=@()
+ try {
+ $rawoutput=@(netsh advfirewall firewall delete rule name="$($fwsettings.'Rule Name')")
+ $rawoutput | Where {$_ -match '^([^:]+):\s*(\S.*)$'} | Foreach -Begin {
+ $FirstRun = $true;
+ $HashProps = @{};
+ } -Process {
+ if (($Matches[1] -eq 'Rule Name') -and (!($FirstRun))) {
+ $output=$HashProps;
+ $HashProps = @{};
+ };
+ $HashProps.$($Matches[1]) = $Matches[2];
+ $FirstRun = $false;
+ } -End {
+ $output=$HashProps;
+ };
+ $msg+=@("Removed the rule")
+ $result=@{
+ failed=$false
+ changed=$true
+ msg=$msg
+ output=$output
+ };
+ } catch [Exception]{
+ $msg+=@("Failed to remove the rule")
+ $result=@{
+ failed=$true
+ error=$_.Exception.Message
+ msg=$msg
+ }
+ };
+ return $result
+}
+
+# Mount Drives
+$change=$false;
+$fail=$false;
+$msg=@();
+$fwsettings=@{}
+
+# Variabelise the arguments
+$params=Parse-Args $args;
+
+$name = Get-AnsibleParam -obj $params -name "name" -failifempty $true
+$direction = Get-AnsibleParam -obj $params -name "direction" -failifempty $true -validateSet "in","out"
+$action = Get-AnsibleParam -obj $params -name "action" -failifempty $true -validateSet "allow","block","bypass"
+$program = Get-AnsibleParam -obj $params -name "program"
+$service = Get-AnsibleParam -obj $params -name "service" -default "any"
+$description = Get-AnsibleParam -obj $params -name "description"
+$enable = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "enable" -default "true")
+$winprofile = Get-AnsibleParam -obj $params -name "profile" -default "any"
+$localip = Get-AnsibleParam -obj $params -name "localip" -default "any"
+$remoteip = Get-AnsibleParam -obj $params -name "remoteip" -default "any"
+$localport = Get-AnsibleParam -obj $params -name "localport" -default "any"
+$remoteport = Get-AnsibleParam -obj $params -name "remoteport" -default "any"
+$protocol = Get-AnsibleParam -obj $params -name "protocol" -default "any"
+
+$state = Get-AnsibleParam -obj $params -name "state" -failifempty $true -validateSet "present","absent"
+$force = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "force" -default "false")
+
+# Check the arguments
+If ($enable -eq $true) {
+ $fwsettings.Add("Enabled", "yes");
+} Else {
+ $fwsettings.Add("Enabled", "no");
+};
+
+$fwsettings.Add("Rule Name", $name)
+#$fwsettings.Add("displayname", $name)
+
+$state = $state.ToString().ToLower()
+If ($state -eq "present"){
+ $fwsettings.Add("Direction", $direction)
+ $fwsettings.Add("Action", $action)
+};
+
+If ($description) {
+ $fwsettings.Add("Description", $description);
+}
+
+If ($program) {
+ $fwsettings.Add("Program", $program);
+}
+
+$fwsettings.Add("LocalIP", $localip);
+$fwsettings.Add("RemoteIP", $remoteip);
+$fwsettings.Add("LocalPort", $localport);
+$fwsettings.Add("RemotePort", $remoteport);
+$fwsettings.Add("Service", $service);
+$fwsettings.Add("Protocol", $protocol);
+$fwsettings.Add("Profiles", $winprofile)
+
+$output=@()
+$capture=getFirewallRule ($fwsettings);
+if ($capture.failed -eq $true) {
+ $msg+=$capture.msg;
+ $result=New-Object psobject @{
+ changed=$false
+ failed=$true
+ error=$capture.error
+ msg=$msg
+ };
+ Exit-Json $result;
+} else {
+ $diff=$capture.difference
+ $msg+=$capture.msg;
+ $identical=$capture.identical;
+ $multiple=$capture.multiple;
+}
+
+
+switch ($state){
+ "present" {
+ if ($capture.exists -eq $false) {
+ $capture=createFireWallRule($fwsettings);
+ $msg+=$capture.msg;
+ $change=$true;
+ if ($capture.failed -eq $true){
+ $result=New-Object psobject @{
+ failed=$capture.failed
+ error=$capture.error
+ output=$capture.output
+ changed=$change
+ msg=$msg
+ difference=$diff
+ fwsettings=$fwsettings
+ };
+ Exit-Json $result;
+ }
+ } elseif ($capture.identical -eq $false) {
+ if ($force -eq $true) {
+ $capture=removeFirewallRule($fwsettings);
+ $msg+=$capture.msg;
+ $change=$true;
+ if ($capture.failed -eq $true){
+ $result=New-Object psobject @{
+ failed=$capture.failed
+ error=$capture.error
+ changed=$change
+ msg=$msg
+ output=$capture.output
+ fwsettings=$fwsettings
+ };
+ Exit-Json $result;
+ }
+ $capture=createFireWallRule($fwsettings);
+ $msg+=$capture.msg;
+ $change=$true;
+ if ($capture.failed -eq $true){
+ $result=New-Object psobject @{
+ failed=$capture.failed
+ error=$capture.error
+ changed=$change
+ msg=$msg
+ difference=$diff
+ fwsettings=$fwsettings
+ };
+ Exit-Json $result;
+ }
+
+ } else {
+ $fail=$true
+ $msg+=@("There was already a rule $name with different values, use force=True to overwrite it");
+ }
+ } elseif ($capture.identical -eq $true) {
+ $msg+=@("Firewall rule $name was already created");
+ };
+ }
+ "absent" {
+ if ($capture.exists -eq $true) {
+ $capture=removeFirewallRule($fwsettings);
+ $msg+=$capture.msg;
+ $change=$true;
+ if ($capture.failed -eq $true){
+ $result=New-Object psobject @{
+ failed=$capture.failed
+ error=$capture.error
+ changed=$change
+ msg=$msg
+ output=$capture.output
+ fwsettings=$fwsettings
+ };
+ Exit-Json $result;
+ }
+ } else {
+ $msg+=@("Firewall rule $name did not exist");
+ };
+ }
+};
+
+
+$result=New-Object psobject @{
+ failed=$fail
+ changed=$change
+ msg=$msg
+ difference=$diff
+ fwsettings=$fwsettings
+};
+
+
+Exit-Json $result;
diff --git a/lib/ansible/modules/extras/windows/win_firewall_rule.py b/lib/ansible/modules/extras/windows/win_firewall_rule.py
new file mode 100644
index 0000000000..3ed0f7e3e7
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_firewall_rule.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+
+# (c) 2014, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_firewall_rule
+version_added: "2.0"
+author: Timothy Vandenbrande
+short_description: Windows firewall automation
+description:
+ - allows you to create/remove/update firewall rules
+options:
+ enable:
+ description:
+ - is this firewall rule enabled or disabled
+ default: true
+ required: false
+ state:
+ description:
+ - should this rule be added or removed
+ default: "present"
+ required: true
+ choices: ['present', 'absent']
+ name:
+ description:
+ - the rules name
+ default: null
+ required: true
+ direction:
+ description:
+ - is this rule for inbound or outbound trafic
+ default: null
+ required: true
+ choices: ['in', 'out']
+ action:
+ description:
+ - what to do with the items this rule is for
+ default: null
+ required: true
+ choices: ['allow', 'block', 'bypass']
+ description:
+ description:
+ - description for the firewall rule
+ default: null
+ required: false
+ localip:
+ description:
+ - the local ip address this rule applies to
+ default: 'any'
+ required: false
+ remoteip:
+ description:
+ - the remote ip address/range this rule applies to
+ default: 'any'
+ required: false
+ localport:
+ description:
+ - the local port this rule applies to
+ default: 'any'
+ required: false
+ remoteport:
+ description:
+ - the remote port this rule applies to
+ default: 'any'
+ required: false
+ program:
+ description:
+ - the program this rule applies to
+ default: null
+ required: false
+ service:
+ description:
+ - the service this rule applies to
+ default: 'any'
+ required: false
+ protocol:
+ description:
+ - the protocol this rule applies to
+ default: 'any'
+ required: false
+ profile:
+ description:
+ - the profile this rule applies to, e.g. Domain,Private,Public
+ default: 'any'
+ required: false
+ force:
+ description:
+ - Enforces the change if a rule with different values exists
+ default: false
+ required: false
+
+
+'''
+
+EXAMPLES = '''
+- name: Firewall rule to allow smtp on TCP port 25
+ action: win_firewall_rule
+ args:
+ name: smtp
+ enable: yes
+ state: present
+ localport: 25
+ action: allow
+ direction: In
+ protocol: TCP
+
+'''
diff --git a/lib/ansible/modules/extras/windows/win_iis_virtualdirectory.ps1 b/lib/ansible/modules/extras/windows/win_iis_virtualdirectory.ps1
new file mode 100644
index 0000000000..44854ff09b
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_virtualdirectory.ps1
@@ -0,0 +1,132 @@
+#!powershell
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# Site
+$site = Get-Attr $params "site" $FALSE;
+If ($site -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: site";
+}
+
+# Application
+$application = Get-Attr $params "application" $FALSE;
+
+# State parameter
+$state = Get-Attr $params "state" "present";
+If (($state -ne 'present') -and ($state -ne 'absent')) {
+ Fail-Json $result "state is '$state'; must be 'present' or 'absent'"
+}
+
+# Path parameter
+$physical_path = Get-Attr $params "physical_path" $FALSE;
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) {
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ directory = New-Object psobject
+ changed = $false
+};
+
+# Construct path
+$directory_path = if($application) {
+ "IIS:\Sites\$($site)\$($application)\$($name)"
+} else {
+ "IIS:\Sites\$($site)\$($name)"
+}
+
+# Directory info
+$directory = if($application) {
+ Get-WebVirtualDirectory -Site $site -Name $name -Application $application
+} else {
+ Get-WebVirtualDirectory -Site $site -Name $name
+}
+
+try {
+ # Add directory
+ If(($state -eq 'present') -and (-not $directory)) {
+ If ($physical_path -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required arguments: physical_path"
+ }
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $directory_parameters = New-Object psobject @{
+ Site = $site
+ Name = $name
+ PhysicalPath = $physical_path
+ };
+
+ If ($application) {
+ $directory_parameters.Application = $application
+ }
+
+ $directory = New-WebVirtualDirectory @directory_parameters -Force
+ $result.changed = $true
+ }
+
+ # Remove directory
+ If ($state -eq 'absent' -and $directory) {
+ Remove-Item $directory_path
+ $result.changed = $true
+ }
+
+ $directory = Get-WebVirtualDirectory -Site $site -Name $name
+ If($directory) {
+
+ # Change Physical Path if needed
+ if($physical_path) {
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $vdir_folder = Get-Item $directory.PhysicalPath
+ $folder = Get-Item $physical_path
+ If($folder.FullName -ne $vdir_folder.FullName) {
+ Set-ItemProperty $directory_path -name physicalPath -value $physical_path
+ $result.changed = $true
+ }
+ }
+ }
+} catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+# Result
+$directory = Get-WebVirtualDirectory -Site $site -Name $name
+$result.directory = New-Object psobject @{
+ PhysicalPath = $directory.PhysicalPath
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_iis_virtualdirectory.py b/lib/ansible/modules/extras/windows/win_iis_virtualdirectory.py
new file mode 100644
index 0000000000..66810b8407
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_virtualdirectory.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_iis_virtualdirectory
+version_added: "2.0"
+short_description: Configures a virtual directory in IIS.
+description:
+ - Creates, Removes and configures a virtual directory in IIS.
+options:
+ name:
+ description:
+ - The name of the virtual directory to create or remove
+ required: true
+ state:
+ description:
+ - Whether to add or remove the specified virtual directory
+ choices:
+ - absent
+ - present
+ required: false
+ default: present
+ site:
+ description:
+ - The site name under which the virtual directory is created or exists.
+ required: true
+ application:
+ description:
+ - The application under which the virtual directory is created or exists.
+ required: false
+ default: null
+ physical_path:
+ description:
+ - The physical path to the folder in which the new virtual directory is created. The specified folder must already exist.
+ required: false
+ default: null
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This creates a virtual directory if it doesn't exist.
+$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite state=present physical_path=c:\\virtualdirectory\\some" host
+
+# This removes a virtual directory if it exists.
+$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite state=absent" host
+
+# This creates a virtual directory on an application if it doesn't exist.
+$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite application=someapp state=present physical_path=c:\\virtualdirectory\\some" host
+'''
diff --git a/lib/ansible/modules/extras/windows/win_iis_webapplication.ps1 b/lib/ansible/modules/extras/windows/win_iis_webapplication.ps1
new file mode 100644
index 0000000000..e576dd5081
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_webapplication.ps1
@@ -0,0 +1,132 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# Site
+$site = Get-Attr $params "site" $FALSE;
+If ($site -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: site";
+}
+
+# State parameter
+$state = Get-Attr $params "state" "present";
+$state.ToString().ToLower();
+If (($state -ne 'present') -and ($state -ne 'absent')) {
+ Fail-Json $result "state is '$state'; must be 'present' or 'absent'"
+}
+
+# Path parameter
+$physical_path = Get-Attr $params "physical_path" $FALSE;
+
+# Application Pool Parameter
+$application_pool = Get-Attr $params "application_pool" $FALSE;
+
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) {
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ application = New-Object psobject
+ changed = $false
+};
+
+# Application info
+$application = Get-WebApplication -Site $site -Name $name
+
+try {
+ # Add application
+ If(($state -eq 'present') -and (-not $application)) {
+ If ($physical_path -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required arguments: physical_path"
+ }
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $application_parameters = New-Object psobject @{
+ Site = $site
+ Name = $name
+ PhysicalPath = $physical_path
+ };
+
+ If ($application_pool) {
+ $application_parameters.ApplicationPool = $application_pool
+ }
+
+ $application = New-WebApplication @application_parameters -Force
+ $result.changed = $true
+
+ }
+
+ # Remove application
+ if ($state -eq 'absent' -and $application) {
+ $application = Remove-WebApplication -Site $site -Name $name
+ $result.changed = $true
+ }
+
+ $application = Get-WebApplication -Site $site -Name $name
+ If($application) {
+
+ # Change Physical Path if needed
+ if($physical_path) {
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $app_folder = Get-Item $application.PhysicalPath
+ $folder = Get-Item $physical_path
+ If($folder.FullName -ne $app_folder.FullName) {
+ Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name physicalPath -value $physical_path
+ $result.changed = $true
+ }
+ }
+
+ # Change Application Pool if needed
+ if($application_pool) {
+ If($application_pool -ne $application.applicationPool) {
+ Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name applicationPool -value $application_pool
+ $result.changed = $true
+ }
+ }
+ }
+} catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+# Result
+$application = Get-WebApplication -Site $site -Name $name
+$result.application = New-Object psobject @{
+ PhysicalPath = $application.PhysicalPath
+ ApplicationPool = $application.applicationPool
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_iis_webapplication.py b/lib/ansible/modules/extras/windows/win_iis_webapplication.py
new file mode 100644
index 0000000000..b8ebd08516
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_webapplication.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_iis_webapplication
+version_added: "2.0"
+short_description: Configures a IIS Web application.
+description:
+ - Creates, Removes and configures a IIS Web applications
+options:
+ name:
+ description:
+ - Name of the Web applicatio
+ required: true
+ default: null
+ aliases: []
+ site:
+ description:
+ - Name of the site on which the application is created.
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the web application
+ choices:
+ - present
+ - absent
+ required: false
+ default: null
+ aliases: []
+ physical_path:
+ description:
+ - The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist.
+ required: false
+ default: null
+ aliases: []
+ application_pool:
+ description:
+ - The application pool in which the new site executes.
+ required: false
+ default: null
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host
+
+'''
diff --git a/lib/ansible/modules/extras/windows/win_iis_webapppool.ps1 b/lib/ansible/modules/extras/windows/win_iis_webapppool.ps1
new file mode 100644
index 0000000000..4172dc2f33
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_webapppool.ps1
@@ -0,0 +1,123 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# State parameter
+$state = Get-Attr $params "state" $FALSE;
+$valid_states = ('started', 'restarted', 'stopped', 'absent');
+If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) {
+ Fail-Json $result "state is '$state'; must be $($valid_states)"
+}
+
+# Attributes parameter - Pipe separated list of attributes where
+# keys and values are separated by comma (paramA:valyeA|paramB:valueB)
+$attributes = @{};
+If (Get-Member -InputObject $params -Name attributes) {
+ $params.attributes -split '\|' | foreach {
+ $key, $value = $_ -split "\:";
+ $attributes.Add($key, $value);
+ }
+}
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $NULL){
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ changed = $FALSE
+ attributes = $attributes
+};
+
+# Get pool
+$pool = Get-Item IIS:\AppPools\$name
+
+try {
+ # Add
+ if (-not $pool -and $state -in ('started', 'stopped', 'restarted')) {
+ New-WebAppPool $name
+ $result.changed = $TRUE
+ }
+
+ # Remove
+ if ($pool -and $state -eq 'absent') {
+ Remove-WebAppPool $name
+ $result.changed = $TRUE
+ }
+
+ $pool = Get-Item IIS:\AppPools\$name
+ if($pool) {
+ # Set properties
+ $attributes.GetEnumerator() | foreach {
+ $newParameter = $_;
+ $currentParameter = Get-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key
+ if(-not $currentParameter -or ($currentParameter.Value -as [String]) -ne $newParameter.Value) {
+ Set-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key $newParameter.Value
+ $result.changed = $TRUE
+ }
+ }
+
+ # Set run state
+ if (($state -eq 'stopped') -and ($pool.State -eq 'Started')) {
+ Stop-WebAppPool -Name $name -ErrorAction Stop
+ $result.changed = $TRUE
+ }
+ if ((($state -eq 'started') -and ($pool.State -eq 'Stopped'))) {
+ Start-WebAppPool -Name $name -ErrorAction Stop
+ $result.changed = $TRUE
+ }
+ if ($state -eq 'restarted') {
+ switch ($pool.State)
+ {
+ 'Stopped' { Start-WebAppPool -Name $name -ErrorAction Stop }
+ default { Restart-WebAppPool -Name $name -ErrorAction Stop }
+ }
+ $result.changed = $TRUE
+ }
+ }
+} catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+# Result
+$pool = Get-Item IIS:\AppPools\$name
+if ($pool)
+{
+ $result.info = @{
+ name = $pool.Name
+ state = $pool.State
+ attributes = New-Object psobject @{}
+ };
+
+ $pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)};
+}
+
+Exit-Json $result \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_iis_webapppool.py b/lib/ansible/modules/extras/windows/win_iis_webapppool.py
new file mode 100644
index 0000000000..c77c3b04cb
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_webapppool.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: win_iis_webapppool
+version_added: "2.0"
+short_description: Configures a IIS Web Application Pool.
+description:
+ - Creates, Removes and configures a IIS Web Application Pool
+options:
+ name:
+ description:
+ - Names of application pool
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the binding
+ choices:
+ - absent
+ - stopped
+ - started
+ - restarted
+ required: false
+ default: null
+ aliases: []
+ attributes:
+ description:
+ - Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
+ required: false
+ default: null
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This return information about an existing application pool
+$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows
+host | success >> {
+ "attributes": {},
+ "changed": false,
+ "info": {
+ "attributes": {
+ "CLRConfigFile": "",
+ "applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415",
+ "autoStart": true,
+ "enable32BitAppOnWin64": false,
+ "enableConfigurationOverride": true,
+ "managedPipelineMode": 0,
+ "managedRuntimeLoader": "webengine4.dll",
+ "managedRuntimeVersion": "v4.0",
+ "name": "DefaultAppPool",
+ "passAnonymousToken": true,
+ "queueLength": 1000,
+ "startMode": 0,
+ "state": 1
+ },
+ "name": "DefaultAppPool",
+ "state": "Started"
+ }
+}
+
+# This creates a new application pool in 'Started' state
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows
+
+# This stoppes an application pool
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows
+
+# This restarts an application pool
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
+
+# This restarts an application pool
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
+
+# This change application pool attributes without touching state
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
+
+# This creates an application pool and sets attributes
+$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
+
+
+# Playbook example
+---
+
+- name: App Pool with .NET 4.0
+ win_iis_webapppool:
+ name: 'AppPool'
+ state: started
+ attributes: managedRuntimeVersion:v4.0
+ register: webapppool
+
+'''
diff --git a/lib/ansible/modules/extras/windows/win_iis_webbinding.ps1 b/lib/ansible/modules/extras/windows/win_iis_webbinding.ps1
new file mode 100644
index 0000000000..dfd9cdb958
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_webbinding.ps1
@@ -0,0 +1,131 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$name = Get-AnsibleParam $params -name "name" -failifempty $true
+$state = Get-AnsibleParam $params "state" -default "present" -validateSet "present","absent"
+$host_header = Get-AnsibleParam $params -name "host_header"
+$protocol = Get-AnsibleParam $params -name "protocol"
+$port = Get-AnsibleParam $params -name "port"
+$ip = Get-AnsibleParam $params -name "ip"
+$certificatehash = Get-AnsibleParam $params -name "certificate_hash" -default $false
+$certificateStoreName = Get-AnsibleParam $params -name "certificate_store_name" -default "MY"
+
+$binding_parameters = New-Object psobject @{
+ Name = $name
+};
+
+If ($host_header) {
+ $binding_parameters.HostHeader = $host_header
+}
+
+If ($protocol) {
+ $binding_parameters.Protocol = $protocol
+}
+
+If ($port) {
+ $binding_parameters.Port = $port
+}
+
+If ($ip) {
+ $binding_parameters.IPAddress = $ip
+}
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){
+ Import-Module WebAdministration
+}
+
+function Create-Binding-Info {
+ return New-Object psobject @{
+ "bindingInformation" = $args[0].bindingInformation
+ "certificateHash" = $args[0].certificateHash
+ "certificateStoreName" = $args[0].certificateStoreName
+ "isDsMapperEnabled" = $args[0].isDsMapperEnabled
+ "protocol" = $args[0].protocol
+ "sslFlags" = $args[0].sslFlags
+ }
+}
+
+# Result
+$result = New-Object psobject @{
+ changed = $false
+ parameters = $binding_parameters
+ matched = @()
+ removed = @()
+ added = @()
+};
+
+# Get bindings matching parameters
+$curent_bindings = Get-WebBinding @binding_parameters
+$curent_bindings | Foreach {
+ $result.matched += Create-Binding-Info $_
+}
+
+try {
+ # Add
+ if (-not $curent_bindings -and $state -eq 'present') {
+ New-WebBinding @binding_parameters -Force
+
+ # Select certificat
+ if($certificateHash -ne $FALSE) {
+
+ $ip = $binding_parameters["IPAddress"]
+ if((!$ip) -or ($ip -eq "*")) {
+ $ip = "0.0.0.0"
+ }
+
+ $port = $binding_parameters["Port"]
+ if(!$port) {
+ $port = 443
+ }
+
+ $result.port = $port
+ $result.ip = $ip
+
+ Push-Location IIS:\SslBindings\
+ Get-Item Cert:\LocalMachine\$certificateStoreName\$certificateHash | New-Item "$($ip)!$($port)"
+ Pop-Location
+ }
+
+ $result.added += Create-Binding-Info (Get-WebBinding @binding_parameters)
+ $result.changed = $true
+ }
+
+ # Remove
+ if ($curent_bindings -and $state -eq 'absent') {
+ $curent_bindings | foreach {
+ Remove-WebBinding -InputObject $_
+ $result.removed += Create-Binding-Info $_
+ }
+ $result.changed = $true
+ }
+
+
+}
+catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_iis_webbinding.py b/lib/ansible/modules/extras/windows/win_iis_webbinding.py
new file mode 100644
index 0000000000..0aa1ee1259
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_webbinding.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: win_iis_webbinding
+version_added: "2.0"
+short_description: Configures a IIS Web site.
+description:
+ - Creates, Removes and configures a binding to an existing IIS Web site
+options:
+ name:
+ description:
+ - Names of web site
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the binding
+ choices:
+ - present
+ - absent
+ required: false
+ default: null
+ aliases: []
+ port:
+ description:
+ - The port to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ ip:
+ description:
+ - The IP address to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ host_header:
+ description:
+ - The host header to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ protocol:
+ description:
+ - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP).
+ required: false
+ default: null
+ aliases: []
+ certificate_hash:
+ description:
+ - Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate.
+ required: false
+ default: null
+ aliases: []
+ certificate_store_name:
+ description:
+ - Name of the certificate store where the certificate for the binding is located.
+ required: false
+ default: "My"
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This will return binding information for an existing host
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site'" windows
+host | success >> {
+ "added": [],
+ "changed": false,
+ "matched": [
+ {
+ "bindingInformation": "*:80:",
+ "certificateHash": "",
+ "certificateStoreName": "",
+ "isDsMapperEnabled": false,
+ "protocol": "http",
+ "sslFlags": 0
+ }
+ ],
+ "parameters": {
+ "Name": "Default Web Site"
+ },
+ "removed": []
+}
+
+# This will return the HTTPS binding information for an existing host
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https" windows
+
+# This will return the HTTPS binding information for an existing host
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port:9090 state=present" windows
+
+# This will add a HTTP binding on port 9090
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows
+
+# This will remove the HTTP binding on port 9090
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows
+
+# This will add a HTTPS binding
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https state=present" windows
+
+# This will add a HTTPS binding and select certificate to use
+# ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https certificate_hash= B0D0FA8408FC67B230338FCA584D03792DA73F4C" windows
+
+
+# Playbook example
+---
+
+- name: Website http/https bidings
+ win_iis_webbinding:
+ name: "Default Web Site"
+ protocol: https
+ port: 443
+ certificate_hash: "D1A3AF8988FD32D1A3AF8988FD323792DA73F4C"
+ state: present
+ when: monitor_use_https
+
+'''
diff --git a/lib/ansible/modules/extras/windows/win_iis_website.ps1 b/lib/ansible/modules/extras/windows/win_iis_website.ps1
new file mode 100644
index 0000000000..74fc3df302
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_website.ps1
@@ -0,0 +1,196 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# State parameter
+$state = Get-Attr $params "state" $FALSE;
+$state.ToString().ToLower();
+If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted') -and ($state -ne 'absent')) {
+ Fail-Json (New-Object psobject) "state is '$state'; must be 'started', 'restarted', 'stopped' or 'absent'"
+}
+
+# Path parameter
+$physical_path = Get-Attr $params "physical_path" $FALSE;
+$site_id = Get-Attr $params "site_id" $FALSE;
+
+# Application Pool Parameter
+$application_pool = Get-Attr $params "application_pool" $FALSE;
+
+# Binding Parameters
+$bind_port = Get-Attr $params "port" $FALSE;
+$bind_ip = Get-Attr $params "ip" $FALSE;
+$bind_hostname = Get-Attr $params "hostname" $FALSE;
+$bind_ssl = Get-Attr $params "ssl" $FALSE;
+
+# Custom site Parameters from string where properties
+# are seperated by a pipe and property name/values by colon.
+# Ex. "foo:1|bar:2"
+$parameters = Get-Attr $params "parameters" $null;
+if($parameters -ne $null) {
+ $parameters = @($parameters -split '\|' | ForEach {
+ return ,($_ -split "\:", 2);
+ })
+}
+
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) {
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ site = New-Object psobject
+ changed = $false
+};
+
+# Site info
+$site = Get-Website | Where { $_.Name -eq $name }
+
+Try {
+ # Add site
+ If(($state -ne 'absent') -and (-not $site)) {
+ If ($physical_path -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required arguments: physical_path"
+ }
+ ElseIf (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $site_parameters = New-Object psobject @{
+ Name = $name
+ PhysicalPath = $physical_path
+ };
+
+ If ($application_pool) {
+ $site_parameters.ApplicationPool = $application_pool
+ }
+
+ If ($site_id) {
+ $site_parameters.ID = $site_id
+ }
+
+ If ($bind_port) {
+ $site_parameters.Port = $bind_port
+ }
+
+ If ($bind_ip) {
+ $site_parameters.IPAddress = $bind_ip
+ }
+
+ If ($bind_hostname) {
+ $site_parameters.HostHeader = $bind_hostname
+ }
+
+ # Fix for error "New-Item : Index was outside the bounds of the array."
+ # This is a bug in the New-WebSite commandlet. Apparently there must be at least one site configured in IIS otherwise New-WebSite crashes.
+ # For more details, see http://stackoverflow.com/questions/3573889/ps-c-new-website-blah-throws-index-was-outside-the-bounds-of-the-array
+ $sites_list = get-childitem -Path IIS:\sites
+ if ($sites_list -eq $null) { $site_parameters.ID = 1 }
+
+ $site = New-Website @site_parameters -Force
+ $result.changed = $true
+ }
+
+ # Remove site
+ If ($state -eq 'absent' -and $site) {
+ $site = Remove-Website -Name $name
+ $result.changed = $true
+ }
+
+ $site = Get-Website | Where { $_.Name -eq $name }
+ If($site) {
+ # Change Physical Path if needed
+ if($physical_path) {
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $folder = Get-Item $physical_path
+ If($folder.FullName -ne $site.PhysicalPath) {
+ Set-ItemProperty "IIS:\Sites\$($site.Name)" -name physicalPath -value $folder.FullName
+ $result.changed = $true
+ }
+ }
+
+ # Change Application Pool if needed
+ if($application_pool) {
+ If($application_pool -ne $site.applicationPool) {
+ Set-ItemProperty "IIS:\Sites\$($site.Name)" -name applicationPool -value $application_pool
+ $result.changed = $true
+ }
+ }
+
+ # Set properties
+ if($parameters) {
+ $parameters | foreach {
+ $parameter_value = Get-ItemProperty "IIS:\Sites\$($site.Name)" $_[0]
+ if((-not $parameter_value) -or ($parameter_value.Value -as [String]) -ne $_[1]) {
+ Set-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] $_[1]
+ $result.changed = $true
+ }
+ }
+ }
+
+ # Set run state
+ if (($state -eq 'stopped') -and ($site.State -eq 'Started'))
+ {
+ Stop-Website -Name $name -ErrorAction Stop
+ $result.changed = $true
+ }
+ if ((($state -eq 'started') -and ($site.State -eq 'Stopped')) -or ($state -eq 'restarted'))
+ {
+ Start-Website -Name $name -ErrorAction Stop
+ $result.changed = $true
+ }
+ }
+}
+Catch
+{
+ Fail-Json (New-Object psobject) $_.Exception.Message
+}
+
+if ($state -ne 'absent')
+{
+ $site = Get-Website | Where { $_.Name -eq $name }
+}
+
+if ($site)
+{
+ $result.site = New-Object psobject @{
+ Name = $site.Name
+ ID = $site.ID
+ State = $site.State
+ PhysicalPath = $site.PhysicalPath
+ ApplicationPool = $site.applicationPool
+ Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation })
+ }
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_iis_website.py b/lib/ansible/modules/extras/windows/win_iis_website.py
new file mode 100644
index 0000000000..b158fb8d8a
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_iis_website.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_iis_website
+version_added: "2.0"
+short_description: Configures a IIS Web site.
+description:
+ - Creates, Removes and configures a IIS Web site
+options:
+ name:
+ description:
+ - Names of web site
+ required: true
+ default: null
+ aliases: []
+ site_id:
+ description:
+ - Explicitly set the IIS numeric ID for a site. Note that this value cannot be changed after the website has been created.
+ required: false
+ version_added: "2.1"
+ default: null
+ state:
+ description:
+ - State of the web site
+ choices:
+ - started
+ - restarted
+ - stopped
+ - absent
+ required: false
+ default: null
+ aliases: []
+ physical_path:
+ description:
+ - The physical path on the remote host to use for the new site. The specified folder must already exist.
+ required: false
+ default: null
+ aliases: []
+ application_pool:
+ description:
+ - The application pool in which the new site executes.
+ required: false
+ default: null
+ aliases: []
+ port:
+ description:
+ - The port to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ ip:
+ description:
+ - The IP address to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ hostname:
+ description:
+ - The host header to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ ssl:
+ description:
+ - Enables HTTPS binding on the site..
+ required: false
+ default: null
+ aliases: []
+ parameters:
+ description:
+ - Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2"
+ required: false
+ default: null
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This return information about an existing host
+$ ansible -i vagrant-inventory -m win_iis_website -a "name='Default Web Site'" window
+host | success >> {
+ "changed": false,
+ "site": {
+ "ApplicationPool": "DefaultAppPool",
+ "Bindings": [
+ "*:80:"
+ ],
+ "ID": 1,
+ "Name": "Default Web Site",
+ "PhysicalPath": "%SystemDrive%\\inetpub\\wwwroot",
+ "State": "Stopped"
+ }
+}
+
+# This stops an existing site.
+$ ansible -i hosts -m win_iis_website -a "name='Default Web Site' state=stopped" host
+
+# This creates a new site.
+$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host
+
+# Change logfile .
+$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host
+
+
+# Playbook example
+---
+
+- name: Acme IIS site
+ win_iis_website:
+ name: "Acme"
+ state: started
+ port: 80
+ ip: 127.0.0.1
+ hostname: acme.local
+ application_pool: "acme"
+ physical_path: 'c:\\sites\\acme'
+ parameters: 'logfile.directory:c:\\sites\\logs'
+ register: website
+
+'''
diff --git a/lib/ansible/modules/extras/windows/win_nssm.ps1 b/lib/ansible/modules/extras/windows/win_nssm.ps1
new file mode 100644
index 0000000000..2801307f60
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_nssm.ps1
@@ -0,0 +1,630 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, George Frank <george@georgefrank.net>
+# Copyright 2015, Adam Keech <akeech@chathamfinancial.com>
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$name = Get-Attr $params "name" -failifempty $true
+$state = Get-Attr $params "state" -default "present" -validateSet "present", "absent", "started", "stopped", "restarted" -resultobj $result
+
+$application = Get-Attr $params "application" -default $null
+$appParameters = Get-Attr $params "app_parameters" -default $null
+$startMode = Get-Attr $params "start_mode" -default "auto" -validateSet "auto", "manual", "disabled" -resultobj $result
+
+$stdoutFile = Get-Attr $params "stdout_file" -default $null
+$stderrFile = Get-Attr $params "stderr_file" -default $null
+$dependencies = Get-Attr $params "dependencies" -default $null
+
+$user = Get-Attr $params "user" -default $null
+$password = Get-Attr $params "password" -default $null
+
+Function Service-Exists
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ return [bool](Get-Service "$name" -ErrorAction SilentlyContinue)
+}
+
+Function Nssm-Remove
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ if (Service-Exists -name $name)
+ {
+ $cmd = "nssm stop ""$name"""
+ $results = invoke-expression $cmd
+
+ $cmd = "nssm remove ""$name"" confirm"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error removing service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+}
+
+Function Nssm-Install
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$true)]
+ [AllowEmptyString()]
+ [string]$application
+ )
+
+ if (!$application)
+ {
+ Throw "Error installing service ""$name"". No application was supplied."
+ }
+
+ if (!(Service-Exists -name $name))
+ {
+ $cmd = "nssm install ""$name"" $application"
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error installing service ""$name"""
+ }
+
+ $result.changed = $true
+
+ } else {
+ $cmd = "nssm get ""$name"" Application"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error installing service ""$name"""
+ }
+
+ if ($results -ne $application)
+ {
+ $cmd = "nssm set ""$name"" Application $application"
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error installing service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+ }
+}
+
+Function ParseAppParameters()
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [AllowEmptyString()]
+ [string]$appParameters
+ )
+
+ $escapedAppParameters = $appParameters.TrimStart("@").TrimStart("{").TrimEnd("}").Replace("; ","`n").Replace("\","\\")
+
+ return ConvertFrom-StringData -StringData $escapedAppParameters
+}
+
+
+Function Nssm-Update-AppParameters
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$true)]
+ [AllowEmptyString()]
+ [string]$appParameters
+ )
+
+ $cmd = "nssm get ""$name"" AppParameters"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating AppParameters for service ""$name"""
+ }
+
+ $appParamKeys = @()
+ $appParamVals = @()
+ $singleLineParams = ""
+
+ if ($appParameters)
+ {
+ $appParametersHash = ParseAppParameters -appParameters $appParameters
+ $appParametersHash.GetEnumerator() |
+ % {
+ $key = $($_.Name)
+ $val = $($_.Value)
+
+ $appParamKeys += $key
+ $appParamVals += $val
+
+ if ($key -eq "_") {
+ $singleLineParams = "$val " + $singleLineParams
+ } else {
+ $singleLineParams = $singleLineParams + "$key ""$val"""
+ }
+ }
+
+ Set-Attr $result "nssm_app_parameters_parsed" $appParametersHash
+ Set-Attr $result "nssm_app_parameters_keys" $appParamKeys
+ Set-Attr $result "nssm_app_parameters_vals" $appParamVals
+ }
+
+ Set-Attr $result "nssm_app_parameters" $appParameters
+ Set-Attr $result "nssm_single_line_app_parameters" $singleLineParams
+
+ if ($results -ne $singleLineParams)
+ {
+ if ($appParameters)
+ {
+ $cmd = "nssm set ""$name"" AppParameters $singleLineParams"
+ } else {
+ $cmd = "nssm set ""$name"" AppParameters '""""'"
+ }
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating AppParameters for service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+}
+
+Function Nssm-Set-Ouput-Files
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [string]$stdout,
+ [string]$stderr
+ )
+
+ $cmd = "nssm get ""$name"" AppStdout"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error retrieving existing stdout file for service ""$name"""
+ }
+
+ if ($results -ne $stdout)
+ {
+ if (!$stdout)
+ {
+ $cmd = "nssm reset ""$name"" AppStdout"
+ } else {
+ $cmd = "nssm set ""$name"" AppStdout $stdout"
+ }
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error setting stdout file for service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+
+ $cmd = "nssm get ""$name"" AppStderr"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error retrieving existing stderr file for service ""$name"""
+ }
+
+ if ($results -ne $stderr)
+ {
+ if (!$stderr)
+ {
+ $cmd = "nssm reset ""$name"" AppStderr"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error clearing stderr file setting for service ""$name"""
+ }
+ } else {
+ $cmd = "nssm set ""$name"" AppStderr $stderr"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error setting stderr file for service ""$name"""
+ }
+ }
+
+ $result.changed = $true
+ }
+
+ ###
+ # Setup file rotation so we don't accidentally consume too much disk
+ ###
+
+ #set files to overwrite
+ $cmd = "nssm set ""$name"" AppStdoutCreationDisposition 2"
+ $results = invoke-expression $cmd
+
+ $cmd = "nssm set ""$name"" AppStderrCreationDisposition 2"
+ $results = invoke-expression $cmd
+
+ #enable file rotation
+ $cmd = "nssm set ""$name"" AppRotateFiles 1"
+ $results = invoke-expression $cmd
+
+ #don't rotate until the service restarts
+ $cmd = "nssm set ""$name"" AppRotateOnline 0"
+ $results = invoke-expression $cmd
+
+ #both of the below conditions must be met before rotation will happen
+ #minimum age before rotating
+ $cmd = "nssm set ""$name"" AppRotateSeconds 86400"
+ $results = invoke-expression $cmd
+
+ #minimum size before rotating
+ $cmd = "nssm set ""$name"" AppRotateBytes 104858"
+ $results = invoke-expression $cmd
+}
+
+Function Nssm-Update-Credentials
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$false)]
+ [string]$user,
+ [Parameter(Mandatory=$false)]
+ [string]$password
+ )
+
+ $cmd = "nssm get ""$name"" ObjectName"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating credentials for service ""$name"""
+ }
+
+ if ($user) {
+ if (!$password) {
+ Throw "User without password is informed for service ""$name"""
+ }
+ else {
+ $fullUser = $user
+ If (-Not($user.contains("@")) -And ($user.Split("\").count -eq 1)) {
+ $fullUser = ".\" + $user
+ }
+
+ If ($results -ne $fullUser) {
+ $cmd = "nssm set ""$name"" ObjectName $fullUser $password"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating credentials for service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+ }
+ }
+}
+
+Function Nssm-Update-Dependencies
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$false)]
+ [string]$dependencies
+ )
+
+ $cmd = "nssm get ""$name"" DependOnService"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating dependencies for service ""$name"""
+ }
+
+ If (($dependencies) -and ($results.Tolower() -ne $dependencies.Tolower())) {
+ $cmd = "nssm set ""$name"" DependOnService $dependencies"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating dependencies for service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+}
+
+Function Nssm-Update-StartMode
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$true)]
+ [string]$mode
+ )
+
+ $cmd = "nssm get ""$name"" Start"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating start mode for service ""$name"""
+ }
+
+ $modes=@{"auto" = "SERVICE_AUTO_START"; "manual" = "SERVICE_DEMAND_START"; "disabled" = "SERVICE_DISABLED"}
+ $mappedMode = $modes.$mode
+ if ($mappedMode -ne $results) {
+ $cmd = "nssm set ""$name"" Start $mappedMode"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating start mode for service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+}
+
+Function Nssm-Get-Status
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ $cmd = "nssm status ""$name"""
+ $results = invoke-expression $cmd
+
+ return ,$results
+}
+
+Function Nssm-Start
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ $currentStatus = Nssm-Get-Status -name $name
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error starting service ""$name"""
+ }
+
+ switch ($currentStatus)
+ {
+ "SERVICE_RUNNING" { <# Nothing to do #> }
+ "SERVICE_STOPPED" { Nssm-Start-Service-Command -name $name }
+
+ "SERVICE_CONTINUE_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name }
+ "SERVICE_PAUSE_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name }
+ "SERVICE_PAUSED" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name }
+ "SERVICE_START_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name }
+ "SERVICE_STOP_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name }
+ }
+}
+
+Function Nssm-Start-Service-Command
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ $cmd = "nssm start ""$name"""
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error starting service ""$name"""
+ }
+
+ $result.changed = $true
+}
+
+Function Nssm-Stop-Service-Command
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ $cmd = "nssm stop ""$name"""
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error stopping service ""$name"""
+ }
+
+ $result.changed = $true
+}
+
+Function Nssm-Stop
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ $currentStatus = Nssm-Get-Status -name $name
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error stopping service ""$name"""
+ }
+
+ if ($currentStatus -ne "SERVICE_STOPPED")
+ {
+ $cmd = "nssm stop ""$name"""
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error stopping service ""$name"""
+ }
+
+ $result.changed = $true
+ }
+}
+
+Function Nssm-Restart
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name
+ )
+
+ Nssm-Stop-Service-Command -name $name
+ Nssm-Start-Service-Command -name $name
+}
+
+Function NssmProcedure
+{
+ Nssm-Install -name $name -application $application
+ Nssm-Update-AppParameters -name $name -appParameters $appParameters
+ Nssm-Set-Ouput-Files -name $name -stdout $stdoutFile -stderr $stderrFile
+ Nssm-Update-Dependencies -name $name -dependencies $dependencies
+ Nssm-Update-Credentials -name $name -user $user -password $password
+ Nssm-Update-StartMode -name $name -mode $startMode
+}
+
+Try
+{
+ switch ($state)
+ {
+ "absent" { Nssm-Remove -name $name }
+ "present" {
+ NssmProcedure
+ }
+ "started" {
+ NssmProcedure
+ Nssm-Start -name $name
+ }
+ "stopped" {
+ NssmProcedure
+ Nssm-Stop -name $name
+ }
+ "restarted" {
+ NssmProcedure
+ Nssm-Restart -name $name
+ }
+ }
+
+ Exit-Json $result;
+}
+Catch
+{
+ Fail-Json $result $_.Exception.Message
+}
diff --git a/lib/ansible/modules/extras/windows/win_nssm.py b/lib/ansible/modules/extras/windows/win_nssm.py
new file mode 100644
index 0000000000..c0a4332cc3
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_nssm.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Heyo
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_nssm
+version_added: "2.0"
+short_description: NSSM - the Non-Sucking Service Manager
+description:
+ - nssm is a service helper which doesn't suck. See https://nssm.cc/ for more information.
+requirements:
+ - "nssm >= 2.24.0 # (install via win_chocolatey) win_chocolatey: name=nssm"
+options:
+ name:
+ description:
+ - Name of the service to operate on
+ required: true
+ state:
+ description:
+ - State of the service on the system
+ - Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the ansible command module
+ required: false
+ choices:
+ - present
+ - started
+ - stopped
+ - restarted
+ - absent
+ default: started
+ application:
+ description:
+ - The application binary to run as a service
+ - "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)"
+ - "Note that the application name must look like the following, if the directory includes spaces:"
+ - 'nssm install service "c:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"'
+ - "See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info (https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)"
+ required: false
+ default: null
+ stdout_file:
+ description:
+ - Path to receive output
+ required: false
+ default: null
+ stderr_file:
+ description:
+ - Path to receive error output
+ required: false
+ default: null
+ app_parameters:
+ description:
+ - Parameters to be passed to the application when it starts
+ required: false
+ default: null
+ dependencies:
+ description:
+ - Service dependencies that has to be started to trigger startup, separated by comma.
+ required: false
+ default: null
+ user:
+ description:
+ - User to be used for service startup
+ required: false
+ default: null
+ password:
+ description:
+ - Password to be used for service startup
+ required: false
+ default: null
+ start_mode:
+ description:
+ - If C(auto) is selected, the service will start at bootup. C(manual) means that the service will start only when another service needs it. C(disabled) means that the service will stay off, regardless if it is needed or not.
+ required: true
+ default: auto
+ choices:
+ - auto
+ - manual
+ - disabled
+author:
+ - "Adam Keech (@smadam813)"
+ - "George Frank (@georgefrank)"
+ - "Hans-Joachim Kliemeck (@h0nIg)"
+'''
+
+EXAMPLES = '''
+# Install and start the foo service
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+
+# Install and start the foo service with a key-value pair argument
+# This will yield the following command: C:\windows\\foo.exe bar "true"
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ app_parameters:
+ bar: true
+
+# Install and start the foo service with a key-value pair argument, where the argument needs to start with a dash
+# This will yield the following command: C:\windows\\foo.exe -bar "true"
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ app_parameters:
+ "-bar": true
+
+# Install and start the foo service with a single parameter
+# This will yield the following command: C:\windows\\foo.exe bar
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ app_parameters:
+ _: bar
+
+# Install and start the foo service with a mix of single params, and key value pairs
+# This will yield the following command: C:\windows\\foo.exe bar -file output.bat
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ app_parameters:
+ _: bar
+ "-file": "output.bat"
+
+# Install and start the foo service, redirecting stdout and stderr to the same file
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ stdout_file: C:\windows\\foo.log
+ stderr_file: C:\windows\\foo.log
+
+# Install and start the foo service, but wait for dependencies tcpip and adf
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ dependencies: 'adf,tcpip'
+
+# Install and start the foo service with dedicated user
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ user: foouser
+ password: secret
+
+# Install the foo service but do not start it automatically
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ state: present
+ start_mode: manual
+
+# Remove the foo service
+- win_nssm:
+ name: foo
+ state: absent
+'''
diff --git a/lib/ansible/modules/extras/windows/win_owner.ps1 b/lib/ansible/modules/extras/windows/win_owner.ps1
new file mode 100644
index 0000000000..076ab84605
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_owner.ps1
@@ -0,0 +1,136 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+#Functions
+Function UserSearch
+{
+ Param ([string]$accountName)
+ #Check if there's a realm specified
+
+ $searchDomain = $false
+ $searchDomainUPN = $false
+ if ($accountName.Split("\").count -gt 1)
+ {
+ if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME)
+ {
+ $searchDomain = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ }
+ Elseif ($accountName.contains("@"))
+ {
+ $searchDomain = $true
+ $searchDomainUPN = $true
+ }
+ Else
+ {
+ #Default to local user account
+ $accountName = $env:COMPUTERNAME + "\" + $accountName
+ }
+
+ if ($searchDomain -eq $false)
+ {
+ # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too
+ $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName}
+ if ($localaccount)
+ {
+ return $localaccount.SID
+ }
+ }
+ Else
+ {
+ #Search by samaccountname
+ $Searcher = [adsisearcher]""
+
+ If ($searchDomainUPN -eq $false) {
+ $Searcher.Filter = "sAMAccountName=$($accountName)"
+ }
+ Else {
+ $Searcher.Filter = "userPrincipalName=$($accountName)"
+ }
+
+ $result = $Searcher.FindOne()
+ if ($result)
+ {
+ $user = $result.GetDirectoryEntry()
+
+ # get binary SID from AD account
+ $binarySID = $user.ObjectSid.Value
+
+ # convert to string SID
+ return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value
+ }
+ }
+}
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr $params "path" -failifempty $true
+$user = Get-Attr $params "user" -failifempty $true
+$recurse = Get-Attr $params "recurse" "no" -validateSet "no","yes" -resultobj $result
+$recurse = $recurse | ConvertTo-Bool
+
+If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path file or directory does not exist on the host"
+}
+
+# Test that the user/group is resolvable on the local machine
+$sid = UserSearch -AccountName ($user)
+if (!$sid)
+{
+ Fail-Json $result "$user is not a valid user or group on the host machine or domain"
+}
+
+Try {
+ $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid)
+
+ $file = Get-Item -Path $path
+ $acl = Get-Acl $file.FullName
+
+ If ($acl.getOwner([System.Security.Principal.SecurityIdentifier]) -ne $objUser) {
+ $acl.setOwner($objUser)
+ Set-Acl $file.FullName $acl
+
+ Set-Attr $result "changed" $true;
+ }
+
+ If ($recurse) {
+ $files = Get-ChildItem -Path $path -Force -Recurse
+ ForEach($file in $files){
+ $acl = Get-Acl $file.FullName
+
+ If ($acl.getOwner([System.Security.Principal.SecurityIdentifier]) -ne $objUser) {
+ $acl.setOwner($objUser)
+ Set-Acl $file.FullName $acl
+
+ Set-Attr $result "changed" $true;
+ }
+ }
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to change owner on $path for $user"
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_owner.py b/lib/ansible/modules/extras/windows/win_owner.py
new file mode 100644
index 0000000000..1b16c1b727
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_owner.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_owner
+version_added: "2.1"
+short_description: Set owner
+description:
+ - Set owner of files or directories
+options:
+ path:
+ description:
+ - Path to be used for changing owner
+ required: true
+ user:
+ description:
+ - Name to be used for changing owner
+ required: true
+ recurse:
+ description:
+ - Indicates if the owner should be changed recursively
+ required: false
+ choices:
+ - no
+ - yes
+ default: no
+author: Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Playbook example
+---
+- name: Change owner of Path
+ win_owner:
+ path: 'C:\\apache\\'
+ user: apache
+ recurse: yes
+
+- name: Set the owner of root directory
+ win_owner:
+ path: 'C:\\apache\\'
+ user: SYSTEM
+ recurse: no
+'''
+
+RETURN = '''
+
+''' \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_package.ps1 b/lib/ansible/modules/extras/windows/win_package.ps1
new file mode 100644
index 0000000000..544c366086
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_package.ps1
@@ -0,0 +1,1326 @@
+#!powershell
+# (c) 2014, Trond Hindenes <trond@hindenes.com>, and others
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+#region DSC
+
+data LocalizedData
+{
+ # culture="en-US"
+ # TODO: Support WhatIf
+ ConvertFrom-StringData @'
+InvalidIdentifyingNumber=The specified IdentifyingNumber ({0}) is not a valid Guid
+InvalidPath=The specified Path ({0}) is not in a valid format. Valid formats are local paths, UNC, and HTTP
+InvalidNameOrId=The specified Name ({0}) and IdentifyingNumber ({1}) do not match Name ({2}) and IdentifyingNumber ({3}) in the MSI file
+NeedsMoreInfo=Either Name or ProductId is required
+InvalidBinaryType=The specified Path ({0}) does not appear to specify an EXE or MSI file and as such is not supported
+CouldNotOpenLog=The specified LogPath ({0}) could not be opened
+CouldNotStartProcess=The process {0} could not be started
+UnexpectedReturnCode=The return code {0} was not expected. Configuration is likely not correct
+PathDoesNotExist=The given Path ({0}) could not be found
+CouldNotOpenDestFile=Could not open the file {0} for writing
+CouldNotGetHttpStream=Could not get the {0} stream for file {1}
+ErrorCopyingDataToFile=Encountered error while writing the contents of {0} to {1}
+PackageConfigurationComplete=Package configuration finished
+PackageConfigurationStarting=Package configuration starting
+InstalledPackage=Installed package
+UninstalledPackage=Uninstalled package
+NoChangeRequired=Package found in desired state, no action required
+RemoveExistingLogFile=Remove existing log file
+CreateLogFile=Create log file
+MountSharePath=Mount share to get media
+DownloadHTTPFile=Download the media over HTTP or HTTPS
+StartingProcessMessage=Starting process {0} with arguments {1}
+RemoveDownloadedFile=Remove the downloaded file
+PackageInstalled=Package has been installed
+PackageUninstalled=Package has been uninstalled
+MachineRequiresReboot=The machine requires a reboot
+PackageDoesNotAppearInstalled=The package {0} is not installed
+PackageAppearsInstalled=The package {0} is already installed
+PostValidationError=Package from {0} was installed, but the specified ProductId and/or Name does not match package details
+'@
+}
+
+$Debug = $true
+Function Trace-Message
+{
+ param([string] $Message)
+ if($Debug)
+ {
+ Write-Verbose $Message
+ }
+}
+
+$CacheLocation = "$env:ProgramData\Microsoft\Windows\PowerShell\Configuration\BuiltinProvCache\MSFT_PackageResource"
+
+Function Throw-InvalidArgumentException
+{
+ param(
+ [string] $Message,
+ [string] $ParamName
+ )
+
+ $exception = new-object System.ArgumentException $Message,$ParamName
+ $errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,$ParamName,"InvalidArgument",$null
+ throw $errorRecord
+}
+
+Function Throw-InvalidNameOrIdException
+{
+ param(
+ [string] $Message
+ )
+
+ $exception = new-object System.ArgumentException $Message
+ $errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,"NameOrIdNotInMSI","InvalidArgument",$null
+ throw $errorRecord
+}
+
+Function Throw-TerminatingError
+{
+ param(
+ [string] $Message,
+ [System.Management.Automation.ErrorRecord] $ErrorRecord
+ )
+
+ if ($errorRecord)
+ {
+ $exception = new-object "System.InvalidOperationException" $Message,$ErrorRecord.Exception
+ }
+ Else
+ {
+ $exception = new-object "System.InvalidOperationException" $Message
+ }
+
+ $errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,"MachineStateIncorrect","InvalidOperation",$null
+ throw $errorRecord
+}
+
+Function Get-RegistryValueIgnoreError
+{
+ param
+ (
+ [parameter(Mandatory = $true)]
+ [Microsoft.Win32.RegistryHive]
+ $RegistryHive,
+
+ [parameter(Mandatory = $true)]
+ [System.String]
+ $Key,
+
+ [parameter(Mandatory = $true)]
+ [System.String]
+ $Value,
+
+ [parameter(Mandatory = $true)]
+ [Microsoft.Win32.RegistryView]
+ $RegistryView
+ )
+
+ try
+ {
+ $baseKey = [Microsoft.Win32.RegistryKey]::OpenBaseKey($RegistryHive, $RegistryView)
+ $subKey = $baseKey.OpenSubKey($Key)
+ if($subKey -ne $null)
+ {
+ return $subKey.GetValue($Value)
+ }
+ }
+ catch
+ {
+ $exceptionText = ($_ | Out-String).Trim()
+ Write-Verbose "Exception occured in Get-RegistryValueIgnoreError: $exceptionText"
+ }
+ return $null
+}
+
+Function Validate-StandardArguments
+{
+ param(
+ $Path,
+ $ProductId,
+ $Name
+ )
+
+ Trace-Message "Validate-StandardArguments, Path was $Path"
+ $uri = $null
+ try
+ {
+ $uri = [uri] $Path
+ }
+ catch
+ {
+ Throw-InvalidArgumentException ($LocalizedData.InvalidPath -f $Path) "Path"
+ }
+
+ if(-not @("file", "http", "https") -contains $uri.Scheme)
+ {
+ Trace-Message "The uri scheme was $uri.Scheme"
+ Throw-InvalidArgumentException ($LocalizedData.InvalidPath -f $Path) "Path"
+ }
+
+ $pathExt = [System.IO.Path]::GetExtension($Path)
+ Trace-Message "The path extension was $pathExt"
+ if(-not @(".msi",".exe") -contains $pathExt.ToLower())
+ {
+ Throw-InvalidArgumentException ($LocalizedData.InvalidBinaryType -f $Path) "Path"
+ }
+
+ $identifyingNumber = $null
+ if(-not $Name -and -not $ProductId)
+ {
+ #It's a tossup here which argument to blame, so just pick ProductId to encourage customers to use the most efficient version
+ Throw-InvalidArgumentException ($LocalizedData.NeedsMoreInfo -f $Path) "ProductId"
+ }
+ elseif($ProductId)
+ {
+ try
+ {
+ Trace-Message "Parsing $ProductId as an identifyingNumber"
+ $TestGuid = [system.guid]::NewGuid()
+ #Check to see if the productid is a guid
+ if ([guid]::TryParse($ProductId, [ref]$TestGuid))
+ {
+ $identifyingNumber = "{{{0}}}" -f [Guid]::Parse($ProductId).ToString().ToUpper()
+ Trace-Message "Parsed $ProductId as $identifyingNumber (is guid)"
+ }
+ Else
+ {
+ $identifyingNumber = $ProductId
+ Trace-Message "Parsed $ProductId as $identifyingNumber (is not guid)"
+ }
+
+ Trace-Message "Parsed $ProductId as $identifyingNumber"
+ }
+ catch
+ {
+ Throw-InvalidArgumentException ($LocalizedData.InvalidIdentifyingNumber -f $ProductId) $ProductId
+ }
+ }
+
+ return $uri, $identifyingNumber
+}
+
+Function Get-ProductEntry
+{
+ param
+ (
+ [string] $Name,
+ [string] $IdentifyingNumber,
+ [string] $InstalledCheckRegKey,
+ [string] $InstalledCheckRegValueName,
+ [string] $InstalledCheckRegValueData
+ )
+
+ $uninstallKey = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall"
+ $uninstallKeyWow64 = "HKLM:\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall"
+
+ if($IdentifyingNumber)
+ {
+ $keyLocation = "$uninstallKey\$identifyingNumber"
+ $item = Get-Item $keyLocation -EA SilentlyContinue
+ if(-not $item)
+ {
+ $keyLocation = "$uninstallKeyWow64\$identifyingNumber"
+ $item = Get-Item $keyLocation -EA SilentlyContinue
+ }
+
+ return $item
+ }
+
+ foreach($item in (Get-ChildItem -EA Ignore $uninstallKey, $uninstallKeyWow64))
+ {
+ if($Name -eq (Get-LocalizableRegKeyValue $item "DisplayName"))
+ {
+ return $item
+ }
+ }
+
+ if ($InstalledCheckRegKey -and $InstalledCheckRegValueName -and $InstalledCheckRegValueData)
+ {
+ $installValue = $null
+
+ #if 64bit OS, check 64bit registry view first
+ if ((Get-WmiObject -Class Win32_OperatingSystem -ComputerName "localhost" -ea 0).OSArchitecture -eq '64-bit')
+ {
+ $installValue = Get-RegistryValueIgnoreError LocalMachine "$InstalledCheckRegKey" "$InstalledCheckRegValueName" Registry64
+ }
+
+ if($installValue -eq $null)
+ {
+ $installValue = Get-RegistryValueIgnoreError LocalMachine "$InstalledCheckRegKey" "$InstalledCheckRegValueName" Registry32
+ }
+
+ if($installValue)
+ {
+ if($InstalledCheckRegValueData -and $installValue -eq $InstalledCheckRegValueData)
+ {
+ return @{
+ Installed = $true
+ }
+ }
+ }
+ }
+
+ return $null
+}
+
+function Test-TargetResource
+{
+ param
+ (
+ [ValidateSet("Present", "Absent")]
+ [string] $Ensure = "Present",
+
+ [parameter(Mandatory = $true)]
+ [AllowEmptyString()]
+ [string] $Name,
+
+ [parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $Path,
+
+ [parameter(Mandatory = $true)]
+ [AllowEmptyString()]
+ [string] $ProductId,
+
+ [string] $Arguments,
+
+ [pscredential] $Credential,
+
+ [int[]] $ReturnCode,
+
+ [string] $LogPath,
+
+ [pscredential] $RunAsCredential,
+
+ [string] $InstalledCheckRegKey,
+
+ [string] $InstalledCheckRegValueName,
+
+ [string] $InstalledCheckRegValueData
+ )
+
+ $uri, $identifyingNumber = Validate-StandardArguments $Path $ProductId $Name
+ $product = Get-ProductEntry $Name $identifyingNumber $InstalledCheckRegKey $InstalledCheckRegValueName $InstalledCheckRegValueData
+ Trace-Message "Ensure is $Ensure"
+ if($product)
+ {
+ Trace-Message "product found"
+ }
+ else
+ {
+ Trace-Message "product installation cannot be determined"
+ }
+ Trace-Message ("product as boolean is {0}" -f [boolean]$product)
+ $res = ($product -ne $null -and $Ensure -eq "Present") -or ($product -eq $null -and $Ensure -eq "Absent")
+
+ # install registry test overrides the product id test and there is no true product information
+ # when doing a lookup via registry key
+ if ($product -and $InstalledCheckRegKey -and $InstalledCheckRegValueName -and $InstalledCheckRegValueData)
+ {
+ Write-Verbose ($LocalizedData.PackageAppearsInstalled -f $Name)
+ }
+ else
+ {
+ if ($product -ne $null)
+ {
+ $name = Get-LocalizableRegKeyValue $product "DisplayName"
+ Write-Verbose ($LocalizedData.PackageAppearsInstalled -f $name)
+ }
+ else
+ {
+ $displayName = $null
+ if($Name)
+ {
+ $displayName = $Name
+ }
+ else
+ {
+ $displayName = $ProductId
+ }
+
+ Write-Verbose ($LocalizedData.PackageDoesNotAppearInstalled -f $displayName)
+ }
+
+ }
+
+ return $res
+}
+
+function Get-LocalizableRegKeyValue
+{
+ param(
+ [object] $RegKey,
+ [string] $ValueName
+ )
+
+ $res = $RegKey.GetValue("{0}_Localized" -f $ValueName)
+ if(-not $res)
+ {
+ $res = $RegKey.GetValue($ValueName)
+ }
+
+ return $res
+}
+
+function Get-TargetResource
+{
+ param
+ (
+ [parameter(Mandatory = $true)]
+ [AllowEmptyString()]
+ [string] $Name,
+
+ [parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $Path,
+
+ [parameter(Mandatory = $true)]
+ [AllowEmptyString()]
+ [string] $ProductId,
+
+ [string] $InstalledCheckRegKey,
+
+ [string] $InstalledCheckRegValueName,
+
+ [string] $InstalledCheckRegValueData
+ )
+
+ #If the user gave the ProductId then we derive $identifyingNumber
+ $uri, $identifyingNumber = Validate-StandardArguments $Path $ProductId $Name
+
+ $localMsi = $uri.IsFile -and -not $uri.IsUnc
+
+ $product = Get-ProductEntry $Name $identifyingNumber $InstalledCheckRegKey $InstalledCheckRegValueName $InstalledCheckRegValueData
+
+ if(-not $product)
+ {
+ return @{
+ Ensure = "Absent"
+ Name = $Name
+ ProductId = $identifyingNumber
+ Installed = $false
+ InstalledCheckRegKey = $InstalledCheckRegKey
+ InstalledCheckRegValueName = $InstalledCheckRegValueName
+ InstalledCheckRegValueData = $InstalledCheckRegValueData
+ }
+ }
+
+ if ($InstalledCheckRegKey -and $InstalledCheckRegValueName -and $InstalledCheckRegValueData)
+ {
+ return @{
+ Ensure = "Present"
+ Name = $Name
+ ProductId = $identifyingNumber
+ Installed = $true
+ InstalledCheckRegKey = $InstalledCheckRegKey
+ InstalledCheckRegValueName = $InstalledCheckRegValueName
+ InstalledCheckRegValueData = $InstalledCheckRegValueData
+ }
+ }
+
+ #$identifyingNumber can still be null here (e.g. remote MSI with Name specified, local EXE)
+ #If the user gave a ProductId just pass it through, otherwise fill it from the product
+ if(-not $identifyingNumber)
+ {
+ $identifyingNumber = Split-Path -Leaf $product.Name
+ }
+
+ $date = $product.GetValue("InstallDate")
+ if($date)
+ {
+ try
+ {
+ $date = "{0:d}" -f [DateTime]::ParseExact($date, "yyyyMMdd",[System.Globalization.CultureInfo]::CurrentCulture).Date
+ }
+ catch
+ {
+ $date = $null
+ }
+ }
+
+ $publisher = Get-LocalizableRegKeyValue $product "Publisher"
+ $size = $product.GetValue("EstimatedSize")
+ if($size)
+ {
+ $size = $size/1024
+ }
+
+ $version = $product.GetValue("DisplayVersion")
+ $description = $product.GetValue("Comments")
+ $name = Get-LocalizableRegKeyValue $product "DisplayName"
+ return @{
+ Ensure = "Present"
+ Name = $name
+ Path = $Path
+ InstalledOn = $date
+ ProductId = $identifyingNumber
+ Size = $size
+ Installed = $true
+ Version = $version
+ PackageDescription = $description
+ Publisher = $publisher
+ }
+}
+
+Function Get-MsiTools
+{
+ if($script:MsiTools)
+ {
+ return $script:MsiTools
+ }
+
+ $sig = @'
+ [DllImport("msi.dll", CharSet = CharSet.Unicode, PreserveSig = true, SetLastError = true, ExactSpelling = true)]
+ private static extern UInt32 MsiOpenPackageW(string szPackagePath, out IntPtr hProduct);
+
+ [DllImport("msi.dll", CharSet = CharSet.Unicode, PreserveSig = true, SetLastError = true, ExactSpelling = true)]
+ private static extern uint MsiCloseHandle(IntPtr hAny);
+
+ [DllImport("msi.dll", CharSet = CharSet.Unicode, PreserveSig = true, SetLastError = true, ExactSpelling = true)]
+ private static extern uint MsiGetPropertyW(IntPtr hAny, string name, StringBuilder buffer, ref int bufferLength);
+
+ private static string GetPackageProperty(string msi, string property)
+ {
+ IntPtr MsiHandle = IntPtr.Zero;
+ try
+ {
+ var res = MsiOpenPackageW(msi, out MsiHandle);
+ if (res != 0)
+ {
+ return null;
+ }
+
+ int length = 256;
+ var buffer = new StringBuilder(length);
+ res = MsiGetPropertyW(MsiHandle, property, buffer, ref length);
+ return buffer.ToString();
+ }
+ finally
+ {
+ if (MsiHandle != IntPtr.Zero)
+ {
+ MsiCloseHandle(MsiHandle);
+ }
+ }
+ }
+ public static string GetProductCode(string msi)
+ {
+ return GetPackageProperty(msi, "ProductCode");
+ }
+
+ public static string GetProductName(string msi)
+ {
+ return GetPackageProperty(msi, "ProductName");
+ }
+'@
+ $script:MsiTools = Add-Type -PassThru -Namespace Microsoft.Windows.DesiredStateConfiguration.PackageResource `
+ -Name MsiTools -Using System.Text -MemberDefinition $sig
+ return $script:MsiTools
+}
+
+
+Function Get-MsiProductEntry
+{
+ param
+ (
+ [string] $Path
+ )
+
+ if(-not (Test-Path -PathType Leaf $Path) -and ($fileExtension -ne ".msi"))
+ {
+ Throw-TerminatingError ($LocalizedData.PathDoesNotExist -f $Path)
+ }
+
+ $tools = Get-MsiTools
+
+ $pn = $tools::GetProductName($Path)
+
+ $pc = $tools::GetProductCode($Path)
+
+ return $pn,$pc
+}
+
+
+function Set-TargetResource
+{
+ [CmdletBinding(SupportsShouldProcess=$true)]
+ param
+ (
+ [ValidateSet("Present", "Absent")]
+ [string] $Ensure = "Present",
+
+ [parameter(Mandatory = $true)]
+ [AllowEmptyString()]
+ [string] $Name,
+
+ [parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $Path,
+
+ [parameter(Mandatory = $true)]
+ [AllowEmptyString()]
+ [string] $ProductId,
+
+ [string] $Arguments,
+
+ [pscredential] $Credential,
+
+ [int[]] $ReturnCode,
+
+ [string] $LogPath,
+
+ [pscredential] $RunAsCredential,
+
+ [string] $InstalledCheckRegKey,
+
+ [string] $InstalledCheckRegValueName,
+
+ [string] $InstalledCheckRegValueData
+ )
+
+ $ErrorActionPreference = "Stop"
+
+ if((Test-TargetResource -Ensure $Ensure -Name $Name -Path $Path -ProductId $ProductId `
+ -InstalledCheckRegKey $InstalledCheckRegKey -InstalledCheckRegValueName $InstalledCheckRegValueName `
+ -InstalledCheckRegValueData $InstalledCheckRegValueData))
+ {
+ return
+ }
+
+ $uri, $identifyingNumber = Validate-StandardArguments $Path $ProductId $Name
+
+ #Path gets overwritten in the download code path. Retain the user's original Path in case the install succeeded
+ #but the named package wasn't present on the system afterward so we can give a better message
+ $OrigPath = $Path
+
+ Write-Verbose $LocalizedData.PackageConfigurationStarting
+ if(-not $ReturnCode)
+ {
+ $ReturnCode = @(0)
+ }
+
+ $logStream = $null
+ $psdrive = $null
+ $downloadedFileName = $null
+ try
+ {
+ $fileExtension = [System.IO.Path]::GetExtension($Path).ToLower()
+ if($LogPath)
+ {
+ try
+ {
+ if($fileExtension -eq ".msi")
+ {
+ #We want to pre-verify the path exists and is writable ahead of time
+ #even in the MSI case, as detecting WHY the MSI log doesn't exist would
+ #be rather problematic for the user
+ if((Test-Path $LogPath) -and $PSCmdlet.ShouldProcess($LocalizedData.RemoveExistingLogFile,$null,$null))
+ {
+ rm $LogPath
+ }
+
+ if($PSCmdlet.ShouldProcess($LocalizedData.CreateLogFile, $null, $null))
+ {
+ New-Item -Type File $LogPath | Out-Null
+ }
+ }
+ elseif($PSCmdlet.ShouldProcess($LocalizedData.CreateLogFile, $null, $null))
+ {
+ $logStream = new-object "System.IO.StreamWriter" $LogPath,$false
+ }
+ }
+ catch
+ {
+ Throw-TerminatingError ($LocalizedData.CouldNotOpenLog -f $LogPath) $_
+ }
+ }
+
+ #Download or mount file as necessary
+ if(-not ($fileExtension -eq ".msi" -and $Ensure -eq "Absent"))
+ {
+ if($uri.IsUnc -and $PSCmdlet.ShouldProcess($LocalizedData.MountSharePath, $null, $null))
+ {
+ $psdriveArgs = @{Name=([guid]::NewGuid());PSProvider="FileSystem";Root=(Split-Path $uri.LocalPath)}
+ if($Credential)
+ {
+ #We need to optionally include these and then splat the hash otherwise
+ #we pass a null for Credential which causes the cmdlet to pop a dialog up
+ $psdriveArgs["Credential"] = $Credential
+ }
+
+ $psdrive = New-PSDrive @psdriveArgs
+ $Path = Join-Path $psdrive.Root (Split-Path -Leaf $uri.LocalPath) #Necessary?
+ }
+ elseif(@("http", "https") -contains $uri.Scheme -and $Ensure -eq "Present" -and $PSCmdlet.ShouldProcess($LocalizedData.DownloadHTTPFile, $null, $null))
+ {
+ $scheme = $uri.Scheme
+ $outStream = $null
+ $responseStream = $null
+
+ try
+ {
+ Trace-Message "Creating cache location"
+
+ if(-not (Test-Path -PathType Container $CacheLocation))
+ {
+ mkdir $CacheLocation | Out-Null
+ }
+
+ $destName = Join-Path $CacheLocation (Split-Path -Leaf $uri.LocalPath)
+
+ Trace-Message "Need to download file from $scheme, destination will be $destName"
+
+ try
+ {
+ Trace-Message "Creating the destination cache file"
+ $outStream = New-Object System.IO.FileStream $destName, "Create"
+ }
+ catch
+ {
+ #Should never happen since we own the cache directory
+ Throw-TerminatingError ($LocalizedData.CouldNotOpenDestFile -f $destName) $_
+ }
+
+ try
+ {
+ Trace-Message "Creating the $scheme stream"
+ $request = [System.Net.WebRequest]::Create($uri)
+ Trace-Message "Setting default credential"
+ $request.Credentials = [System.Net.CredentialCache]::DefaultCredentials
+ if ($scheme -eq "http")
+ {
+ Trace-Message "Setting authentication level"
+ # default value is MutualAuthRequested, which applies to https scheme
+ $request.AuthenticationLevel = [System.Net.Security.AuthenticationLevel]::None
+ }
+ if ($scheme -eq "https")
+ {
+ Trace-Message "Ignoring bad certificates"
+ $request.ServerCertificateValidationCallBack = {$true}
+ }
+ Trace-Message "Getting the $scheme response stream"
+ $responseStream = (([System.Net.HttpWebRequest]$request).GetResponse()).GetResponseStream()
+ }
+ catch
+ {
+ Trace-Message ("Error: " + ($_ | Out-String))
+ Throw-TerminatingError ($LocalizedData.CouldNotGetHttpStream -f $scheme, $Path) $_
+ }
+
+ try
+ {
+ Trace-Message "Copying the $scheme stream bytes to the disk cache"
+ $responseStream.CopyTo($outStream)
+ $responseStream.Flush()
+ $outStream.Flush()
+ }
+ catch
+ {
+ Throw-TerminatingError ($LocalizedData.ErrorCopyingDataToFile -f $Path,$destName) $_
+ }
+ }
+ finally
+ {
+ if($outStream)
+ {
+ $outStream.Close()
+ }
+
+ if($responseStream)
+ {
+ $responseStream.Close()
+ }
+ }
+ Trace-Message "Redirecting package path to cache file location"
+ $Path = $downloadedFileName = $destName
+ }
+ }
+
+ #At this point the Path ought to be valid unless it's an MSI uninstall case
+ if(-not (Test-Path -PathType Leaf $Path) -and -not ($Ensure -eq "Absent" -and $fileExtension -eq ".msi"))
+ {
+ Throw-TerminatingError ($LocalizedData.PathDoesNotExist -f $Path)
+ }
+
+ $startInfo = New-Object System.Diagnostics.ProcessStartInfo
+ $startInfo.UseShellExecute = $false #Necessary for I/O redirection and just generally a good idea
+ $process = New-Object System.Diagnostics.Process
+ $process.StartInfo = $startInfo
+ $errLogPath = $LogPath + ".err" #Concept only, will never touch disk
+ if($fileExtension -eq ".msi")
+ {
+ $startInfo.FileName = "$env:windir\system32\msiexec.exe"
+ if($Ensure -eq "Present")
+ {
+ # check if Msi package contains the ProductName and Code specified
+ <#
+ $pName,$pCode = Get-MsiProductEntry -Path $Path
+
+ if (
+ ( (-not [String]::IsNullOrEmpty($Name)) -and ($pName -ne $Name)) `
+ -or ( (-not [String]::IsNullOrEmpty($identifyingNumber)) -and ($identifyingNumber -ne $pCode))
+ )
+ {
+ Throw-InvalidNameOrIdException ($LocalizedData.InvalidNameOrId -f $Name,$identifyingNumber,$pName,$pCode)
+ }
+ #>
+
+ $startInfo.Arguments = '/i "{0}"' -f $Path
+ }
+ else
+ {
+ $product = Get-ProductEntry $Name $identifyingNumber
+ $id = Split-Path -Leaf $product.Name #We may have used the Name earlier, now we need the actual ID
+ $startInfo.Arguments = ("/x{0}" -f $id)
+ }
+
+ if($LogPath)
+ {
+ $startInfo.Arguments += ' /log "{0}"' -f $LogPath
+ }
+
+ $startInfo.Arguments += " /quiet"
+
+ if($Arguments)
+ {
+ $startInfo.Arguments += " " + $Arguments
+ }
+ }
+ else #EXE
+ {
+ Trace-Message "The binary is an EXE"
+ $startInfo.FileName = $Path
+ $startInfo.Arguments = $Arguments
+ if($LogPath)
+ {
+ Trace-Message "User has requested logging, need to attach event handlers to the process"
+ $startInfo.RedirectStandardError = $true
+ $startInfo.RedirectStandardOutput = $true
+ Register-ObjectEvent -InputObject $process -EventName "OutputDataReceived" -SourceIdentifier $LogPath
+ Register-ObjectEvent -InputObject $process -EventName "ErrorDataReceived" -SourceIdentifier $errLogPath
+ }
+ }
+
+ Trace-Message ("Starting {0} with {1}" -f $startInfo.FileName, $startInfo.Arguments)
+
+ if($PSCmdlet.ShouldProcess(($LocalizedData.StartingProcessMessage -f $startInfo.FileName, $startInfo.Arguments), $null, $null))
+ {
+ try
+ {
+ $exitCode = 0
+
+ if($PSBoundParameters.ContainsKey("RunAsCredential"))
+ {
+ CallPInvoke
+ [Source.NativeMethods]::CreateProcessAsUser("""" + $startInfo.FileName + """ " + $startInfo.Arguments, `
+ $RunAsCredential.GetNetworkCredential().Domain, $RunAsCredential.GetNetworkCredential().UserName, `
+ $RunAsCredential.GetNetworkCredential().Password, [ref] $exitCode)
+ }
+ else
+ {
+ $process.Start() | Out-Null
+
+ if($logStream) #Identical to $fileExtension -eq ".exe" -and $logPath
+ {
+ $process.BeginOutputReadLine();
+ $process.BeginErrorReadLine();
+ }
+
+ $process.WaitForExit()
+
+ if($process)
+ {
+ $exitCode = $process.ExitCode
+ }
+ }
+ }
+ catch
+ {
+ Throw-TerminatingError ($LocalizedData.CouldNotStartProcess -f $Path) $_
+ }
+
+
+ if($logStream)
+ {
+ #We have to re-mux these since they appear to us as different streams
+ #The underlying Win32 APIs prevent this problem, as would constructing a script
+ #on the fly and executing it, but the former is highly problematic from PowerShell
+ #and the latter doesn't let us get the return code for UI-based EXEs
+ $outputEvents = Get-Event -SourceIdentifier $LogPath
+ $errorEvents = Get-Event -SourceIdentifier $errLogPath
+ $masterEvents = @() + $outputEvents + $errorEvents
+ $masterEvents = $masterEvents | Sort-Object -Property TimeGenerated
+
+ foreach($event in $masterEvents)
+ {
+ $logStream.Write($event.SourceEventArgs.Data);
+ }
+
+ Remove-Event -SourceIdentifier $LogPath
+ Remove-Event -SourceIdentifier $errLogPath
+ }
+
+ if(-not ($ReturnCode -contains $exitCode))
+ {
+ Throw-TerminatingError ($LocalizedData.UnexpectedReturnCode -f $exitCode.ToString())
+ }
+ }
+ }
+ finally
+ {
+ if($psdrive)
+ {
+ Remove-PSDrive -Force $psdrive
+ }
+
+ if($logStream)
+ {
+ $logStream.Dispose()
+ }
+ }
+
+ if($downloadedFileName -and $PSCmdlet.ShouldProcess($LocalizedData.RemoveDownloadedFile, $null, $null))
+ {
+ #This is deliberately not in the Finally block. We want to leave the downloaded file on disk
+ #in the error case as a debugging aid for the user
+ rm $downloadedFileName
+ }
+
+ $operationString = $LocalizedData.PackageUninstalled
+ if($Ensure -eq "Present")
+ {
+ $operationString = $LocalizedData.PackageInstalled
+ }
+
+ # Check if reboot is required, if so notify CA. The MSFT_ServerManagerTasks provider is missing on client SKUs
+ $featureData = invoke-wmimethod -EA Ignore -Name GetServerFeature -namespace root\microsoft\windows\servermanager -Class MSFT_ServerManagerTasks
+ $regData = Get-ItemProperty "HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager" "PendingFileRenameOperations" -EA Ignore
+ if(($featureData -and $featureData.RequiresReboot) -or $regData)
+ {
+ Write-Verbose $LocalizedData.MachineRequiresReboot
+ $global:DSCMachineStatus = 1
+ }
+
+ if($Ensure -eq "Present")
+ {
+ $productEntry = Get-ProductEntry $Name $identifyingNumber $InstalledCheckRegKey $InstalledCheckRegValueName $InstalledCheckRegValueData
+ if(-not $productEntry)
+ {
+ Throw-TerminatingError ($LocalizedData.PostValidationError -f $OrigPath)
+ }
+ }
+
+ Write-Verbose $operationString
+ Write-Verbose $LocalizedData.PackageConfigurationComplete
+}
+
+function CallPInvoke
+{
+$script:ProgramSource = @"
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Security;
+using System.Runtime.InteropServices;
+using System.Diagnostics;
+using System.Security.Principal;
+using System.ComponentModel;
+using System.IO;
+
+namespace Source
+{
+ [SuppressUnmanagedCodeSecurity]
+ public static class NativeMethods
+ {
+ //The following structs and enums are used by the various Win32 API's that are used in the code below
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct STARTUPINFO
+ {
+ public Int32 cb;
+ public string lpReserved;
+ public string lpDesktop;
+ public string lpTitle;
+ public Int32 dwX;
+ public Int32 dwY;
+ public Int32 dwXSize;
+ public Int32 dwXCountChars;
+ public Int32 dwYCountChars;
+ public Int32 dwFillAttribute;
+ public Int32 dwFlags;
+ public Int16 wShowWindow;
+ public Int16 cbReserved2;
+ public IntPtr lpReserved2;
+ public IntPtr hStdInput;
+ public IntPtr hStdOutput;
+ public IntPtr hStdError;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct PROCESS_INFORMATION
+ {
+ public IntPtr hProcess;
+ public IntPtr hThread;
+ public Int32 dwProcessID;
+ public Int32 dwThreadID;
+ }
+
+ [Flags]
+ public enum LogonType
+ {
+ LOGON32_LOGON_INTERACTIVE = 2,
+ LOGON32_LOGON_NETWORK = 3,
+ LOGON32_LOGON_BATCH = 4,
+ LOGON32_LOGON_SERVICE = 5,
+ LOGON32_LOGON_UNLOCK = 7,
+ LOGON32_LOGON_NETWORK_CLEARTEXT = 8,
+ LOGON32_LOGON_NEW_CREDENTIALS = 9
+ }
+
+ [Flags]
+ public enum LogonProvider
+ {
+ LOGON32_PROVIDER_DEFAULT = 0,
+ LOGON32_PROVIDER_WINNT35,
+ LOGON32_PROVIDER_WINNT40,
+ LOGON32_PROVIDER_WINNT50
+ }
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SECURITY_ATTRIBUTES
+ {
+ public Int32 Length;
+ public IntPtr lpSecurityDescriptor;
+ public bool bInheritHandle;
+ }
+
+ public enum SECURITY_IMPERSONATION_LEVEL
+ {
+ SecurityAnonymous,
+ SecurityIdentification,
+ SecurityImpersonation,
+ SecurityDelegation
+ }
+
+ public enum TOKEN_TYPE
+ {
+ TokenPrimary = 1,
+ TokenImpersonation
+ }
+
+ [StructLayout(LayoutKind.Sequential, Pack = 1)]
+ internal struct TokPriv1Luid
+ {
+ public int Count;
+ public long Luid;
+ public int Attr;
+ }
+
+ public const int GENERIC_ALL_ACCESS = 0x10000000;
+ public const int CREATE_NO_WINDOW = 0x08000000;
+ internal const int SE_PRIVILEGE_ENABLED = 0x00000002;
+ internal const int TOKEN_QUERY = 0x00000008;
+ internal const int TOKEN_ADJUST_PRIVILEGES = 0x00000020;
+ internal const string SE_INCRASE_QUOTA = "SeIncreaseQuotaPrivilege";
+
+ [DllImport("kernel32.dll",
+ EntryPoint = "CloseHandle", SetLastError = true,
+ CharSet = CharSet.Auto, CallingConvention = CallingConvention.StdCall)]
+ public static extern bool CloseHandle(IntPtr handle);
+
+ [DllImport("advapi32.dll",
+ EntryPoint = "CreateProcessAsUser", SetLastError = true,
+ CharSet = CharSet.Ansi, CallingConvention = CallingConvention.StdCall)]
+ public static extern bool CreateProcessAsUser(
+ IntPtr hToken,
+ string lpApplicationName,
+ string lpCommandLine,
+ ref SECURITY_ATTRIBUTES lpProcessAttributes,
+ ref SECURITY_ATTRIBUTES lpThreadAttributes,
+ bool bInheritHandle,
+ Int32 dwCreationFlags,
+ IntPtr lpEnvrionment,
+ string lpCurrentDirectory,
+ ref STARTUPINFO lpStartupInfo,
+ ref PROCESS_INFORMATION lpProcessInformation
+ );
+
+ [DllImport("advapi32.dll", EntryPoint = "DuplicateTokenEx")]
+ public static extern bool DuplicateTokenEx(
+ IntPtr hExistingToken,
+ Int32 dwDesiredAccess,
+ ref SECURITY_ATTRIBUTES lpThreadAttributes,
+ Int32 ImpersonationLevel,
+ Int32 dwTokenType,
+ ref IntPtr phNewToken
+ );
+
+ [DllImport("advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern Boolean LogonUser(
+ String lpszUserName,
+ String lpszDomain,
+ String lpszPassword,
+ LogonType dwLogonType,
+ LogonProvider dwLogonProvider,
+ out IntPtr phToken
+ );
+
+ [DllImport("advapi32.dll", ExactSpelling = true, SetLastError = true)]
+ internal static extern bool AdjustTokenPrivileges(
+ IntPtr htok,
+ bool disall,
+ ref TokPriv1Luid newst,
+ int len,
+ IntPtr prev,
+ IntPtr relen
+ );
+
+ [DllImport("kernel32.dll", ExactSpelling = true)]
+ internal static extern IntPtr GetCurrentProcess();
+
+ [DllImport("advapi32.dll", ExactSpelling = true, SetLastError = true)]
+ internal static extern bool OpenProcessToken(
+ IntPtr h,
+ int acc,
+ ref IntPtr phtok
+ );
+
+ [DllImport("kernel32.dll", ExactSpelling = true)]
+ internal static extern int WaitForSingleObject(
+ IntPtr h,
+ int milliseconds
+ );
+
+ [DllImport("kernel32.dll", ExactSpelling = true)]
+ internal static extern bool GetExitCodeProcess(
+ IntPtr h,
+ out int exitcode
+ );
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ internal static extern bool LookupPrivilegeValue(
+ string host,
+ string name,
+ ref long pluid
+ );
+
+ public static void CreateProcessAsUser(string strCommand, string strDomain, string strName, string strPassword, ref int ExitCode )
+ {
+ var hToken = IntPtr.Zero;
+ var hDupedToken = IntPtr.Zero;
+ TokPriv1Luid tp;
+ var pi = new PROCESS_INFORMATION();
+ var sa = new SECURITY_ATTRIBUTES();
+ sa.Length = Marshal.SizeOf(sa);
+ Boolean bResult = false;
+ try
+ {
+ bResult = LogonUser(
+ strName,
+ strDomain,
+ strPassword,
+ LogonType.LOGON32_LOGON_BATCH,
+ LogonProvider.LOGON32_PROVIDER_DEFAULT,
+ out hToken
+ );
+ if (!bResult)
+ {
+ throw new Win32Exception("Logon error #" + Marshal.GetLastWin32Error().ToString());
+ }
+ IntPtr hproc = GetCurrentProcess();
+ IntPtr htok = IntPtr.Zero;
+ bResult = OpenProcessToken(
+ hproc,
+ TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+ ref htok
+ );
+ if(!bResult)
+ {
+ throw new Win32Exception("Open process token error #" + Marshal.GetLastWin32Error().ToString());
+ }
+ tp.Count = 1;
+ tp.Luid = 0;
+ tp.Attr = SE_PRIVILEGE_ENABLED;
+ bResult = LookupPrivilegeValue(
+ null,
+ SE_INCRASE_QUOTA,
+ ref tp.Luid
+ );
+ if(!bResult)
+ {
+ throw new Win32Exception("Lookup privilege error #" + Marshal.GetLastWin32Error().ToString());
+ }
+ bResult = AdjustTokenPrivileges(
+ htok,
+ false,
+ ref tp,
+ 0,
+ IntPtr.Zero,
+ IntPtr.Zero
+ );
+ if(!bResult)
+ {
+ throw new Win32Exception("Token elevation error #" + Marshal.GetLastWin32Error().ToString());
+ }
+
+ bResult = DuplicateTokenEx(
+ hToken,
+ GENERIC_ALL_ACCESS,
+ ref sa,
+ (int)SECURITY_IMPERSONATION_LEVEL.SecurityIdentification,
+ (int)TOKEN_TYPE.TokenPrimary,
+ ref hDupedToken
+ );
+ if(!bResult)
+ {
+ throw new Win32Exception("Duplicate Token error #" + Marshal.GetLastWin32Error().ToString());
+ }
+ var si = new STARTUPINFO();
+ si.cb = Marshal.SizeOf(si);
+ si.lpDesktop = "";
+ bResult = CreateProcessAsUser(
+ hDupedToken,
+ null,
+ strCommand,
+ ref sa,
+ ref sa,
+ false,
+ 0,
+ IntPtr.Zero,
+ null,
+ ref si,
+ ref pi
+ );
+ if(!bResult)
+ {
+ throw new Win32Exception("Create process as user error #" + Marshal.GetLastWin32Error().ToString());
+ }
+
+ int status = WaitForSingleObject(pi.hProcess, -1);
+ if(status == -1)
+ {
+ throw new Win32Exception("Wait during create process failed user error #" + Marshal.GetLastWin32Error().ToString());
+ }
+
+ bResult = GetExitCodeProcess(pi.hProcess, out ExitCode);
+ if(!bResult)
+ {
+ throw new Win32Exception("Retrieving status error #" + Marshal.GetLastWin32Error().ToString());
+ }
+ }
+ finally
+ {
+ if (pi.hThread != IntPtr.Zero)
+ {
+ CloseHandle(pi.hThread);
+ }
+ if (pi.hProcess != IntPtr.Zero)
+ {
+ CloseHandle(pi.hProcess);
+ }
+ if (hDupedToken != IntPtr.Zero)
+ {
+ CloseHandle(hDupedToken);
+ }
+ }
+ }
+ }
+}
+
+"@
+ Add-Type -TypeDefinition $ProgramSource -ReferencedAssemblies "System.ServiceProcess"
+}
+
+#endregion
+
+
+$params = Parse-Args $args;
+$result = New-Object psobject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr -obj $params -name path -failifempty $true -resultobj $result
+$name = Get-Attr -obj $params -name name -default $path
+$productid = Get-Attr -obj $params -name productid
+if ($productid -eq $null)
+{
+ #Alias added for backwards compat.
+ $productid = Get-Attr -obj $params -name product_id -failifempty $true -resultobj $result
+}
+$arguments = Get-Attr -obj $params -name arguments
+$ensure = Get-Attr -obj $params -name state -default "present"
+if ($ensure -eq $null)
+{
+ $ensure = Get-Attr -obj $params -name ensure -default "present"
+}
+$username = Get-Attr -obj $params -name user_name
+$password = Get-Attr -obj $params -name user_password
+$return_code = Get-Attr -obj $params -name expected_return_code -default 0
+
+#Construct the DSC param hashtable
+$dscparams = @{
+ name=$name
+ path=$path
+ productid = $productid
+ arguments = $arguments
+ ensure = $ensure
+ returncode = $return_code
+}
+
+if (($username -ne $null) -and ($password -ne $null))
+{
+ #Add network credential to the list
+ $secpassword = $password | ConvertTo-SecureString -AsPlainText -Force
+ $credential = New-Object pscredential -ArgumentList $username, $secpassword
+ $dscparams.add("Credential",$credential)
+}
+
+#Always return the name
+set-attr -obj $result -name "name" -value $name
+
+$testdscresult = Test-TargetResource @dscparams
+if ($testdscresult -eq $true)
+{
+ Exit-Json -obj $result
+}
+Else
+{
+ try
+ {
+ set-TargetResource @dscparams
+ }
+ catch
+ {
+ $errormsg = $_
+ Fail-Json -obj $result -message $errormsg.ToString()
+ }
+
+ #Check if DSC thinks the computer needs a reboot:
+ if ((get-variable DSCMachinestatus -Scope Global -ea 0) -and ($global:DSCMachineStatus -eq 1))
+ {
+ Set-Attr $result "restart_required" $true
+ }
+
+ #Set-TargetResource did its job. We can assume a change has happened
+ Set-Attr $result "changed" $true
+ Exit-Json -obj $result
+
+}
+
diff --git a/lib/ansible/modules/extras/windows/win_package.py b/lib/ansible/modules/extras/windows/win_package.py
new file mode 100644
index 0000000000..e8a91176c3
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_package.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Trond Hindenes <trond@hindenes.com>, and others
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_package
+version_added: "1.7"
+author: Trond Hindenes
+short_description: Installs/Uninstalls an installable package, either from local file system or url
+description:
+ - Installs or uninstalls a package.
+ - 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)'
+options:
+ path:
+ description:
+ - Location of the package to be installed (either on file system, network share or url)
+ required: true
+ name:
+ description:
+ - Name of the package, if name isn't specified the path will be used for log messages
+ required: false
+ default: null
+ product_id:
+ description:
+ - product id of the installed package (used for checking if already installed)
+ - You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)'
+ required: true
+ aliases: [productid]
+ arguments:
+ description:
+ - Any arguments the installer needs
+ default: null
+ required: false
+ state:
+ description:
+ - Install or Uninstall
+ choices:
+ - present
+ - absent
+ default: present
+ required: false
+ aliases: [ensure]
+ user_name:
+ description:
+ - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
+ default: null
+ required: false
+ user_password:
+ description:
+ - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
+ default: null
+ required: false
+'''
+
+EXAMPLES = '''
+# Playbook example
+- name: Install the vc thingy
+ win_package:
+ name="Microsoft Visual C thingy"
+ path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
+ Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
+ Arguments="/install /passive /norestart"
+
+# Install/uninstall an msi-based package
+- name: Install msi-based package (Remote Desktop Connection Manager)
+ win_package:
+ path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi"
+ product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}"
+- name: Uninstall msi-based package
+ win_package:
+ path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi"
+ product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}"
+ state: absent
+'''
+
diff --git a/lib/ansible/modules/extras/windows/win_regedit.ps1 b/lib/ansible/modules/extras/windows/win_regedit.ps1
new file mode 100644
index 0000000000..723a6c7b23
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_regedit.ps1
@@ -0,0 +1,237 @@
+#!powershell
+# This file is part of Ansible
+#
+# (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+New-PSDrive -PSProvider registry -Root HKEY_CLASSES_ROOT -Name HKCR -ErrorAction SilentlyContinue
+New-PSDrive -PSProvider registry -Root HKEY_USERS -Name HKU -ErrorAction SilentlyContinue
+New-PSDrive -PSProvider registry -Root HKEY_CURRENT_CONFIG -Name HCCC -ErrorAction SilentlyContinue
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+Set-Attr $result "data_changed" $false;
+Set-Attr $result "data_type_changed" $false;
+
+$registryKey = Get-Attr -obj $params -name "key" -failifempty $true
+$registryValue = Get-Attr -obj $params -name "value" -default $null
+$state = Get-Attr -obj $params -name "state" -validateSet "present","absent" -default "present"
+$registryData = Get-Attr -obj $params -name "data" -default $null
+$registryDataType = Get-Attr -obj $params -name "datatype" -validateSet "binary","dword","expandstring","multistring","string","qword" -default "string"
+
+If ($state -eq "present" -and $registryData -eq $null -and $registryValue -ne $null)
+{
+ Fail-Json $result "missing required argument: data"
+}
+
+# check the registry key is in powershell ps-drive format: HKLM, HKCU, HKU, HKCR, HCCC
+If (-not ($registryKey -match "^H[KC][CLU][MURC]{0,1}:\\"))
+{
+ Fail-Json $result "key: $registryKey is not a valid powershell path, see module documentation for examples."
+}
+
+
+Function Test-RegistryValueData {
+ Param (
+ [parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]$Path,
+ [parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]$Value
+ )
+ Try {
+ Get-ItemProperty -Path $Path -Name $Value
+ Return $true
+ }
+ Catch {
+ Return $false
+ }
+}
+
+# Returns true if registry data matches.
+# Handles binary, integer(dword) and string registry data
+Function Compare-RegistryData {
+ Param (
+ [parameter(Mandatory=$true)]
+ [AllowEmptyString()]$ReferenceData,
+ [parameter(Mandatory=$true)]
+ [AllowEmptyString()]$DifferenceData
+ )
+
+ if ($ReferenceData -is [String] -or $ReferenceData -is [int]) {
+ if ($ReferenceData -eq $DifferenceData) {
+ return $true
+ } else {
+ return $false
+ }
+ } elseif ($ReferenceData -is [Object[]]) {
+ if (@(Compare-Object $ReferenceData $DifferenceData -SyncWindow 0).Length -eq 0) {
+ return $true
+ } else {
+ return $false
+ }
+ }
+}
+
+# Simplified version of Convert-HexStringToByteArray from
+# https://cyber-defense.sans.org/blog/2010/02/11/powershell-byte-array-hex-convert
+# Expects a hex in the format you get when you run reg.exe export,
+# and converts to a byte array so powershell can modify binary registry entries
+function Convert-RegExportHexStringToByteArray
+{
+ Param (
+ [parameter(Mandatory=$true)] [String] $String
+ )
+
+# remove 'hex:' from the front of the string if present
+$String = $String.ToLower() -replace '^hex\:', ''
+
+#remove whitespace and any other non-hex crud.
+$String = $String.ToLower() -replace '[^a-f0-9\\,x\-\:]',''
+
+# turn commas into colons
+$String = $String -replace ',',':'
+
+#Maybe there's nothing left over to convert...
+if ($String.Length -eq 0) { ,@() ; return }
+
+#Split string with or without colon delimiters.
+if ($String.Length -eq 1)
+{ ,@([System.Convert]::ToByte($String,16)) }
+elseif (($String.Length % 2 -eq 0) -and ($String.IndexOf(":") -eq -1))
+{ ,@($String -split '([a-f0-9]{2})' | foreach-object { if ($_) {[System.Convert]::ToByte($_,16)}}) }
+elseif ($String.IndexOf(":") -ne -1)
+{ ,@($String -split ':+' | foreach-object {[System.Convert]::ToByte($_,16)}) }
+else
+{ ,@() }
+
+}
+
+if($registryDataType -eq "binary" -and $registryData -ne $null -and $registryData -is [String]) {
+ $registryData = Convert-RegExportHexStringToByteArray($registryData)
+}
+
+if($state -eq "present") {
+ if ((Test-Path $registryKey) -and $registryValue -ne $null)
+ {
+ if (Test-RegistryValueData -Path $registryKey -Value $registryValue)
+ {
+ # handle binary data
+ $currentRegistryData =(Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue)
+
+ if ($registryValue.ToLower() -eq "(default)") {
+ # Special case handling for the key's default property. Because .GetValueKind() doesn't work for the (default) key property
+ $oldRegistryDataType = "String"
+ }
+ else {
+ $oldRegistryDataType = (Get-Item $registryKey).GetValueKind($registryValue)
+ }
+
+ # Changes Data and DataType
+ if ($registryDataType -ne $oldRegistryDataType)
+ {
+ Try
+ {
+ Remove-ItemProperty -Path $registryKey -Name $registryValue
+ New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType
+ $result.changed = $true
+ $result.data_changed = $true
+ $result.data_type_changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ # Changes Only Data
+ elseif (-Not (Compare-RegistryData -ReferenceData $currentRegistryData -DifferenceData $registryData))
+ {
+ Try {
+ Set-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData
+ $result.changed = $true
+ $result.data_changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ }
+ else
+ {
+ Try
+ {
+ New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ }
+ elseif(-not (Test-Path $registryKey))
+ {
+ Try
+ {
+ $newRegistryKey = New-Item $registryKey -Force
+ $result.changed = $true
+
+ if($registryValue -ne $null) {
+ $newRegistryKey | New-ItemProperty -Name $registryValue -Value $registryData -Force -PropertyType $registryDataType
+ $result.changed = $true
+ }
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+}
+else
+{
+ if (Test-Path $registryKey)
+ {
+ if ($registryValue -eq $null) {
+ Try
+ {
+ Remove-Item -Path $registryKey -Recurse
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ elseif (Test-RegistryValueData -Path $registryKey -Value $registryValue) {
+ Try
+ {
+ Remove-ItemProperty -Path $registryKey -Name $registryValue
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ }
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_regedit.py b/lib/ansible/modules/extras/windows/win_regedit.py
new file mode 100644
index 0000000000..d9de288e68
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_regedit.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_regedit
+version_added: "2.0"
+short_description: Add, Edit, or Remove Registry Keys and Values
+description:
+ - Add, Edit, or Remove Registry Keys and Values using ItemProperties Cmdlets
+options:
+ key:
+ description:
+ - Name of Registry Key
+ required: true
+ default: null
+ aliases: []
+ value:
+ description:
+ - Name of Registry Value
+ required: true
+ default: null
+ aliases: []
+ data:
+ description:
+ - Registry Value Data. Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef). The C(hex:) prefix is optional.
+ required: false
+ default: null
+ aliases: []
+ datatype:
+ description:
+ - Registry Value Data Type
+ required: false
+ choices:
+ - binary
+ - dword
+ - expandstring
+ - multistring
+ - string
+ - qword
+ default: string
+ aliases: []
+ state:
+ description:
+ - State of Registry Value
+ required: false
+ choices:
+ - present
+ - absent
+ default: present
+ aliases: []
+author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)"
+'''
+
+EXAMPLES = '''
+ # Creates Registry Key called MyCompany.
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # data for the value "hello" containing "world".
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: world
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # data for the value "hello" containing "1337" as type "dword".
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: 1337
+ datatype: dword
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # binary data for the value "hello" as type "binary"
+ # data expressed as comma separated list
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
+ datatype: binary
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # binary data for the value "hello" as type "binary"
+ # data expressed as yaml array of bytes
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
+ datatype: binary
+
+ # Delete Registry Key MyCompany
+ # NOTE: Not specifying a value will delete the root key which means
+ # all values will be deleted
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ state: absent
+
+ # Delete Registry Value "hello" from MyCompany Key
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ state: absent
+
+ # Ensure registry paths containing spaces are quoted.
+ # Creates Registry Key called 'My Company'.
+ win_regedit:
+ key: 'HKCU:\Software\My Company'
+
+'''
+RETURN = '''
+data_changed:
+ description: whether this invocation changed the data in the registry value
+ returned: success
+ type: boolean
+ sample: False
+data_type_changed:
+ description: whether this invocation changed the datatype of the registry value
+ returned: success
+ type: boolean
+ sample: True
+'''
diff --git a/lib/ansible/modules/extras/windows/win_regmerge.ps1 b/lib/ansible/modules/extras/windows/win_regmerge.ps1
new file mode 100644
index 0000000000..87e73a6977
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_regmerge.ps1
@@ -0,0 +1,100 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+Function Convert-RegistryPath {
+ Param (
+ [parameter(Mandatory=$True)]
+ [ValidateNotNullOrEmpty()]$Path
+ )
+
+ $output = $Path -replace "HKLM:", "HKLM"
+ $output = $output -replace "HKCU:", "HKCU"
+
+ Return $output
+}
+
+$params = Parse-Args $args
+$result = New-Object PSObject
+Set-Attr $result "changed" $False
+
+$path = Get-Attr -obj $params -name path -failifempty $True -resultobj $result
+$compare_to = Get-Attr -obj $params -name compare_to -failifempty $False -resultobj $result
+
+# check it looks like a reg key, warn if key not present - will happen first time
+# only accepting PS-Drive style key names (starting with HKLM etc, not HKEY_LOCAL_MACHINE etc)
+
+$do_comparison = $False
+
+If ($compare_to) {
+ $compare_to_key = $params.compare_to.ToString()
+ If (Test-Path $compare_to_key -pathType container ) {
+ $do_comparison = $True
+ } Else {
+ Set-Attr $result "compare_to_key_found" $False
+ }
+}
+
+If ( $do_comparison -eq $True ) {
+ $guid = [guid]::NewGuid()
+ $exported_path = $env:TEMP + "\" + $guid.ToString() + 'ansible_win_regmerge.reg'
+
+ $expanded_compare_key = Convert-RegistryPath ($compare_to_key)
+
+ # export from the reg key location to a file
+ $reg_args = @("EXPORT", "$expanded_compare_key", $exported_path)
+ & reg.exe $reg_args
+
+ # compare the two files
+ $comparison_result = Compare-Object -ReferenceObject $(Get-Content $path) -DifferenceObject $(Get-Content $exported_path)
+
+ If (Get-Member -InputObject $comparison_result -Name "count" -MemberType Properties )
+ {
+ # Something is different, actually do reg merge
+ $reg_import_args = @("IMPORT", "$path")
+ $ret = & reg.exe $reg_import_args 2>&1
+ If ($LASTEXITCODE -eq 0) {
+ Set-Attr $result "changed" $True
+ Set-Attr $result "difference_count" $comparison_result.count
+ } Else {
+ Set-Attr $result "rc" $LASTEXITCODE
+ Fail-Json $result "$ret"
+ }
+ } Else {
+ Set-Attr $result "difference_count" 0
+ }
+
+ Remove-Item $exported_path
+ Set-Attr $result "compared" $True
+
+} Else {
+ # not comparing, merge and report changed
+ $reg_import_args = @("IMPORT", "$path")
+ $ret = & reg.exe $reg_import_args 2>&1
+ If ( $LASTEXITCODE -eq 0 ) {
+ Set-Attr $result "changed" $True
+ Set-Attr $result "compared" $False
+ } Else {
+ Set-Attr $result "rc" $LASTEXITCODE
+ Fail-Json $result "$ret"
+ }
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_regmerge.py b/lib/ansible/modules/extras/windows/win_regmerge.py
new file mode 100644
index 0000000000..6507b84b9c
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_regmerge.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_regmerge
+version_added: "2.1"
+short_description: Merges the contents of a registry file into the windows registry
+description:
+ - Wraps the reg.exe command to import the contents of a registry file.
+ - Suitable for use with registry files created using M(win_template).
+ - Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not be merged.
+ - Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
+ - Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
+ - See also M(win_template), M(win_regedit)
+options:
+ path:
+ description:
+ - The full path including file name to the registry file on the remote machine to be merged
+ required: true
+ default: no default
+ compare_key:
+ description:
+ - The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry. Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
+ If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
+ required: false
+ default: no default
+author: "Jon Hawkesworth (@jhawkesworth)"
+notes:
+ - Organise your registry files so that they contain a single root registry
+ key if you want to use the compare_to functionality.
+ This module does not force registry settings to be in the state
+ described in the file. If registry settings have been modified externally
+ the module will merge the contents of the file but continue to report
+ differences on subsequent runs.
+ To force registry change, use M(win_regedit) with state=absent before
+ using M(win_regmerge).
+'''
+
+EXAMPLES = '''
+ # Merge in a registry file without comparing to current registry
+ # Note that paths using / to separate are preferred as they require less special handling than \
+ win_regmerge:
+ path: C:/autodeploy/myCompany-settings.reg
+ # Compare and merge registry file
+ win_regmerge:
+ path: C:/autodeploy/myCompany-settings.reg
+ compare_to: HKLM:\SOFTWARE\myCompany
+'''
+
+RETURN = '''
+compare_to_key_found:
+ description: whether the parent registry key has been found for comparison
+ returned: when comparison key not found in registry
+ type: boolean
+ sample: false
+difference_count:
+ description: number of differences between the registry and the file
+ returned: changed
+ type: integer
+ sample: 1
+compared:
+ description: whether a comparison has taken place between the registry and the file
+ returned: when a comparison key has been supplied and comparison has been attempted
+ type: boolean
+ sample: true
+'''
diff --git a/lib/ansible/modules/extras/windows/win_robocopy.ps1 b/lib/ansible/modules/extras/windows/win_robocopy.ps1
new file mode 100644
index 0000000000..69cf9ee3e3
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_robocopy.ps1
@@ -0,0 +1,147 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Corwin Brown <corwin.brown@maxpoint.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_robocopy = New-Object psobject @{
+ recurse = $false
+ purge = $false
+ }
+ changed = $false
+}
+
+$src = Get-AnsibleParam -obj $params -name "src" -failifempty $true
+$dest = Get-AnsibleParam -obj $params -name "dest" -failifempty $true
+$purge = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "purge" -default $false)
+$recurse = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "recurse" -default $false)
+$flags = Get-AnsibleParam -obj $params -name "flags" -default $null
+$_ansible_check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -default $false
+
+# Search for an Error Message
+# Robocopy seems to display an error after 3 '-----' separator lines
+Function SearchForError($cmd_output, $default_msg) {
+ $separator_count = 0
+ $error_msg = $default_msg
+ ForEach ($line in $cmd_output) {
+ if (-Not $line) {
+ continue
+ }
+
+ if ($separator_count -ne 3) {
+ if (Select-String -InputObject $line -pattern "^(\s+)?(\-+)(\s+)?$") {
+ $separator_count += 1
+ }
+ }
+ Else {
+ If (Select-String -InputObject $line -pattern "error") {
+ $error_msg = $line
+ break
+ }
+ }
+ }
+
+ return $error_msg
+}
+
+# Build Arguments
+$robocopy_opts = @()
+
+if (-Not (Test-Path $src)) {
+ Fail-Json $result "$src does not exist!"
+}
+
+$robocopy_opts += $src
+Set-Attr $result.win_robocopy "src" $src
+
+$robocopy_opts += $dest
+Set-Attr $result.win_robocopy "dest" $dest
+
+if ($flags -eq $null) {
+ if ($purge) {
+ $robocopy_opts += "/purge"
+ }
+
+ if ($recurse) {
+ $robocopy_opts += "/e"
+ }
+}
+Else {
+ $robocopy_opts += $flags
+}
+
+Set-Attr $result.win_robocopy "purge" $purge
+Set-Attr $result.win_robocopy "recurse" $recurse
+Set-Attr $result.win_robocopy "flags" $flags
+
+$robocopy_output = ""
+$rc = 0
+If ($_ansible_check_mode -eq $true) {
+ $robocopy_output = "Would have copied the contents of $src to $dest"
+ $rc = 0
+}
+Else {
+ Try {
+ &robocopy $robocopy_opts | Tee-Object -Variable robocopy_output | Out-Null
+ $rc = $LASTEXITCODE
+ }
+ Catch {
+ $ErrorMessage = $_.Exception.Message
+ Fail-Json $result "Error synchronizing $src to $dest! Msg: $ErrorMessage"
+ }
+}
+
+Set-Attr $result.win_robocopy "return_code" $rc
+Set-Attr $result.win_robocopy "output" $robocopy_output
+
+$cmd_msg = "Success"
+If ($rc -eq 0) {
+ $cmd_msg = "No files copied."
+}
+ElseIf ($rc -eq 1) {
+ $cmd_msg = "Files copied successfully!"
+ $changed = $true
+}
+ElseIf ($rc -eq 2) {
+ $cmd_msg = "Extra files or directories were detected!"
+ $changed = $true
+}
+ElseIf ($rc -eq 4) {
+ $cmd_msg = "Some mismatched files or directories were detected!"
+ $changed = $true
+}
+ElseIf ($rc -eq 8) {
+ $error_msg = SearchForError $robocopy_output "Some files or directories could not be copied!"
+ Fail-Json $result $error_msg
+}
+ElseIf ($rc -eq 10) {
+ $error_msg = SearchForError $robocopy_output "Serious Error! No files were copied! Do you have permissions to access $src and $dest?"
+ Fail-Json $result $error_msg
+}
+ElseIf ($rc -eq 16) {
+ $error_msg = SearchForError $robocopy_output "Fatal Error!"
+ Fail-Json $result $error_msg
+}
+
+Set-Attr $result.win_robocopy "msg" $cmd_msg
+Set-Attr $result.win_robocopy "changed" $changed
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_robocopy.py b/lib/ansible/modules/extras/windows/win_robocopy.py
new file mode 100644
index 0000000000..d627918e52
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_robocopy.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Corwin Brown <blakfeld@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = """
+---
+module: win_robocopy
+version_added: "2.2"
+short_description: Synchronizes the contents of two directories using Robocopy.
+description:
+ - Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available on most modern Windows Systems.
+options:
+ src:
+ description:
+ - Source file/directory to sync.
+ required: true
+ dest:
+ description:
+ - Destination file/directory to sync (Will receive contents of src).
+ required: true
+ recurse:
+ description:
+ - Includes all subdirectories (Toggles the `/e` flag to RoboCopy). If "flags" is set, this will be ignored.
+ choices:
+ - true
+ - false
+ defaults: false
+ required: false
+ purge:
+ description:
+ - Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is set, this will be ignored.
+ choices:
+ - true
+ - false
+ defaults: false
+ required: false
+ flags:
+ description:
+ - Directly supply Robocopy flags. If set, purge and recurse will be ignored.
+ default: None
+ required: false
+author: Corwin Brown (@blakfeld)
+notes:
+ - This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine, not from the master to the remote machine.
+ - This module does not currently support all Robocopy flags.
+ - Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12
+"""
+
+EXAMPLES = """
+# Syncs the contents of one diretory to another.
+$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo"
+
+# Sync the contents of one directory to another, including subdirectories.
+$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo recurse=true"
+
+# Sync the contents of one directory to another, and remove any files/directories found in destination that do not exist in the source.
+$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo purge=true"
+
+# Sample sync
+---
+- name: Sync Two Directories
+ win_robocopy:
+ src: "C:\\DirectoryOne
+ dest: "C:\\DirectoryTwo"
+ recurse: true
+ purge: true
+
+---
+- name: Sync Two Directories
+ win_robocopy:
+ src: "C:\\DirectoryOne
+ dest: "C:\\DirectoryTwo"
+ recurse: true
+ purge: true
+ flags: '/XD SOME_DIR /XF SOME_FILE /MT:32'
+"""
+
+RETURN = '''
+src:
+ description: The Source file/directory of the sync.
+ returned: always
+ type: string
+ sample: "c:/Some/Path"
+dest:
+ description: The Destination file/directory of the sync.
+ returned: always
+ type: string
+ sample: "c:/Some/Path"
+recurse:
+ description: Whether or not the recurse flag was toggled.
+ returned: always
+ type: bool
+ sample: False
+purge:
+ description: Whether or not the purge flag was toggled.
+ returned: always
+ type: bool
+ sample: False
+flags:
+ description: Any flags passed in by the user.
+ returned: always
+ type: string
+ sample: "/e /purge"
+return_code:
+ description: The return code retuned by robocopy.
+ returned: success
+ type: int
+ sample: 1
+output:
+ description: The output of running the robocopy command.
+ returned: success
+ type: string
+ sample: "-------------------------------------------------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n-------------------------------------------------------------------------------\n"
+msg:
+ description: Output intrepreted into a concise message.
+ returned: always
+ type: string
+ sample: No files copied!
+changed:
+ description: Whether or not any changes were made.
+ returned: always
+ type: bool
+ sample: False
+'''
diff --git a/lib/ansible/modules/extras/windows/win_scheduled_task.ps1 b/lib/ansible/modules/extras/windows/win_scheduled_task.ps1
new file mode 100644
index 0000000000..6490d5562c
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_scheduled_task.ps1
@@ -0,0 +1,164 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Peter Mounce <public@neverrunwithscissors.com>
+# Michael Perzel <michaelperzel@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$days_of_week = Get-AnsibleParam $params -anem "days_of_week"
+$enabled = Get-AnsibleParam $params -name "enabled" -default $true
+$enabled = $enabled | ConvertTo-Bool
+$description = Get-AnsibleParam $params -name "description" -default " "
+$path = Get-AnsibleParam $params -name "path"
+$argument = Get-AnsibleParam $params -name "argument"
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+#Required vars
+$name = Get-AnsibleParam -obj $params -name name -failifempty $true -resultobj $result
+$state = Get-AnsibleParam -obj $params -name state -failifempty $true -resultobj $result -validateSet "present","absent"
+
+#Vars conditionally required
+$present_args_required = $state -eq "present"
+$execute = Get-AnsibleParam -obj $params -name execute -failifempty $present_args_required -resultobj $result
+$frequency = Get-AnsibleParam -obj $params -name frequency -failifempty $present_args_required -resultobj $result
+$time = Get-AnsibleParam -obj $params -name time -failifempty $present_args_required -resultobj $result
+$user = Get-AnsibleParam -obj $params -name user -failifempty $present_args_required -resultobj $result
+
+
+# Mandatory Vars
+if ($frequency -eq "weekly")
+{
+ if (!($days_of_week))
+ {
+ Fail-Json $result "missing required argument: days_of_week"
+ }
+}
+
+if ($path)
+{
+ $path = "\{0}\" -f $path
+}
+else
+{
+ $path = "\" #default
+}
+
+try {
+ $task = Get-ScheduledTask -TaskPath "$path" | Where-Object {$_.TaskName -eq "$name"}
+
+ # Correlate task state to enable variable, used to calculate if state needs to be changed
+ $taskState = if ($task) { $task.State } else { $null }
+ if ($taskState -eq "Ready"){
+ $taskState = $true
+ }
+ elseif($taskState -eq "Disabled"){
+ $taskState = $false
+ }
+ else
+ {
+ $taskState = $null
+ }
+
+ $measure = $task | measure
+ if ($measure.count -eq 1 ) {
+ $exists = $true
+ }
+ elseif ( ($measure.count -eq 0) -and ($state -eq "absent") ){
+ Set-Attr $result "msg" "Task does not exist"
+ Exit-Json $result
+ }
+ elseif ($measure.count -eq 0){
+ $exists = $false
+ }
+ else {
+ # This should never occur
+ Fail-Json $result "$($measure.count) scheduled tasks found"
+ }
+
+ Set-Attr $result "exists" "$exists"
+
+ if ($frequency){
+ if ($frequency -eq "daily") {
+ $trigger = New-ScheduledTaskTrigger -Daily -At $time
+ }
+ elseif ($frequency -eq "weekly"){
+ $trigger = New-ScheduledTaskTrigger -Weekly -At $time -DaysOfWeek $days_of_week
+ }
+ else {
+ Fail-Json $result "frequency must be daily or weekly"
+ }
+ }
+
+ if ( ($state -eq "absent") -and ($exists -eq $true) ) {
+ Unregister-ScheduledTask -TaskName $name -Confirm:$false
+ $result.changed = $true
+ Set-Attr $result "msg" "Deleted task $name"
+ Exit-Json $result
+ }
+ elseif ( ($state -eq "absent") -and ($exists -eq $false) ) {
+ Set-Attr $result "msg" "Task $name does not exist"
+ Exit-Json $result
+ }
+
+ $principal = New-ScheduledTaskPrincipal -UserId "$user" -LogonType ServiceAccount
+
+ if ($enabled -eq $false){
+ $settings = New-ScheduledTaskSettingsSet -Disable
+ }
+ else {
+ $settings = New-ScheduledTaskSettingsSet
+ }
+
+ if ($argument) {
+ $action = New-ScheduledTaskAction -Execute $execute -Argument $argument
+ }
+ else {
+ $action = New-ScheduledTaskAction -Execute $execute
+ }
+
+ if ( ($state -eq "present") -and ($exists -eq $false) ){
+ Register-ScheduledTask -Action $action -Trigger $trigger -TaskName $name -Description $description -TaskPath $path -Settings $settings -Principal $principal
+ $task = Get-ScheduledTask -TaskName $name
+ Set-Attr $result "msg" "Added new task $name"
+ $result.changed = $true
+ }
+ elseif( ($state -eq "present") -and ($exists -eq $true) ) {
+ if ($task.Description -eq $description -and $task.TaskName -eq $name -and $task.TaskPath -eq $path -and $task.Actions.Execute -eq $execute -and $taskState -eq $enabled -and $task.Principal.UserId -eq $user) {
+ #No change in the task
+ Set-Attr $result "msg" "No change in task $name"
+ }
+ else {
+ Unregister-ScheduledTask -TaskName $name -Confirm:$false
+ Register-ScheduledTask -Action $action -Trigger $trigger -TaskName $name -Description $description -TaskPath $path -Settings $settings -Principal $principal
+ Set-Attr $result "msg" "Updated task $name"
+ $result.changed = $true
+ }
+ }
+
+ Exit-Json $result;
+}
+catch
+{
+ Fail-Json $result $_.Exception.Message
+} \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_scheduled_task.py b/lib/ansible/modules/extras/windows/win_scheduled_task.py
new file mode 100644
index 0000000000..3c6ef9d28a
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_scheduled_task.py
@@ -0,0 +1,89 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_scheduled_task
+version_added: "2.0"
+short_description: Manage scheduled tasks
+description:
+ - Manage scheduled tasks
+notes:
+ - This module requires Windows Server 2012 or later.
+options:
+ name:
+ description:
+ - Name of the scheduled task
+ required: true
+ description:
+ description:
+ - The description for the scheduled task
+ required: false
+ enabled:
+ description:
+ - Enable/disable the task
+ choices:
+ - yes
+ - no
+ default: yes
+ state:
+ description:
+ - State that the task should become
+ required: true
+ choices:
+ - present
+ - absent
+ user:
+ description:
+ - User to run scheduled task as
+ required: false
+ execute:
+ description:
+ - Command the scheduled task should execute
+ required: false
+ argument:
+ description:
+ - Arguments to provide scheduled task action
+ required: false
+ frequency:
+ description:
+ - The frequency of the command, not idempotent
+ required: false
+ choices:
+ - daily
+ - weekly
+ time:
+ description:
+ - Time to execute scheduled task, not idempotent
+ required: false
+ days_of_week:
+ description:
+ - Days of the week to run a weekly task, not idempotent
+ required: false
+ path:
+ description:
+ - Folder path of scheduled task
+ default: '\'
+'''
+
+EXAMPLES = '''
+ # Create a scheduled task to open a command prompt
+ win_scheduled_task: name="TaskName" execute="cmd" frequency="daily" time="9am" description="open command prompt" path="example" enable=yes state=present user=SYSTEM
+'''
diff --git a/lib/ansible/modules/extras/windows/win_share.ps1 b/lib/ansible/modules/extras/windows/win_share.ps1
new file mode 100644
index 0000000000..59e4e8ab81
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_share.ps1
@@ -0,0 +1,251 @@
+#!powershell
+# This file is part of Ansible
+
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+#Functions
+Function UserSearch
+{
+ Param ([string]$accountName)
+ #Check if there's a realm specified
+
+ $searchDomain = $false
+ $searchDomainUPN = $false
+ if ($accountName.Split("\").count -gt 1)
+ {
+ if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME)
+ {
+ $searchDomain = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ }
+ Elseif ($accountName.contains("@"))
+ {
+ $searchDomain = $true
+ $searchDomainUPN = $true
+ }
+ Else
+ {
+ #Default to local user account
+ $accountName = $env:COMPUTERNAME + "\" + $accountName
+ }
+
+ if ($searchDomain -eq $false)
+ {
+ # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too
+ $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName}
+ if ($localaccount)
+ {
+ return $localaccount.SID
+ }
+ }
+ Else
+ {
+ #Search by samaccountname
+ $Searcher = [adsisearcher]""
+
+ If ($searchDomainUPN -eq $false) {
+ $Searcher.Filter = "sAMAccountName=$($accountName)"
+ }
+ Else {
+ $Searcher.Filter = "userPrincipalName=$($accountName)"
+ }
+
+ $result = $Searcher.FindOne()
+ if ($result)
+ {
+ $user = $result.GetDirectoryEntry()
+
+ # get binary SID from AD account
+ $binarySID = $user.ObjectSid.Value
+
+ # convert to string SID
+ return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value
+ }
+ }
+}
+Function NormalizeAccounts
+{
+ param(
+ [parameter(valuefrompipeline=$true)]
+ $users
+ )
+
+ $users = $users.Trim()
+ If ($users -eq "") {
+ $splittedUsers = [Collections.Generic.List[String]] @()
+ }
+ Else {
+ $splittedUsers = [Collections.Generic.List[String]] $users.Split(",")
+ }
+
+ $normalizedUsers = [Collections.Generic.List[String]] @()
+ ForEach($splittedUser in $splittedUsers) {
+ $sid = UserSearch $splittedUser
+ If (!$sid) {
+ Fail-Json $result "$splittedUser is not a valid user or group on the host machine or domain"
+ }
+
+ $normalizedUser = (New-Object System.Security.Principal.SecurityIdentifier($sid)).Translate([System.Security.Principal.NTAccount])
+ $normalizedUsers.Add($normalizedUser)
+ }
+
+ return ,$normalizedUsers
+}
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$name = Get-Attr $params "name" -failifempty $true
+$state = Get-Attr $params "state" "present" -validateSet "present","absent" -resultobj $result
+
+Try {
+ $share = Get-SmbShare $name -ErrorAction SilentlyContinue
+ If ($state -eq "absent") {
+ If ($share) {
+ Remove-SmbShare -Force -Name $name
+ Set-Attr $result "changed" $true;
+ }
+ }
+ Else {
+ $path = Get-Attr $params "path" -failifempty $true
+ $description = Get-Attr $params "description" ""
+
+ $permissionList = Get-Attr $params "list" "no" -validateSet "no","yes" -resultobj $result | ConvertTo-Bool
+ $folderEnum = if ($permissionList) { "Unrestricted" } else { "AccessBased" }
+
+ $permissionRead = Get-Attr $params "read" "" | NormalizeAccounts
+ $permissionChange = Get-Attr $params "change" "" | NormalizeAccounts
+ $permissionFull = Get-Attr $params "full" "" | NormalizeAccounts
+ $permissionDeny = Get-Attr $params "deny" "" | NormalizeAccounts
+
+ If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path directory does not exist on the host"
+ }
+
+ # normalize path and remove slash at the end
+ $path = (Get-Item $path).FullName -replace "\\$"
+
+ # need to (re-)create share
+ If (!$share) {
+ New-SmbShare -Name $name -Path $path
+ $share = Get-SmbShare $name -ErrorAction SilentlyContinue
+
+ Set-Attr $result "changed" $true;
+ }
+ If ($share.Path -ne $path) {
+ Remove-SmbShare -Force -Name $name
+
+ New-SmbShare -Name $name -Path $path
+ $share = Get-SmbShare $name -ErrorAction SilentlyContinue
+
+ Set-Attr $result "changed" $true;
+ }
+
+ # updates
+ If ($share.Description -ne $description) {
+ Set-SmbShare -Force -Name $name -Description $description
+ Set-Attr $result "changed" $true;
+ }
+ If ($share.FolderEnumerationMode -ne $folderEnum) {
+ Set-SmbShare -Force -Name $name -FolderEnumerationMode $folderEnum
+ Set-Attr $result "changed" $true;
+ }
+
+ # clean permissions that imply others
+ ForEach ($user in $permissionFull) {
+ $permissionChange.remove($user)
+ $permissionRead.remove($user)
+ }
+ ForEach ($user in $permissionChange) {
+ $permissionRead.remove($user)
+ }
+
+ # remove permissions
+ $permissions = Get-SmbShareAccess -Name $name
+ ForEach ($permission in $permissions) {
+ If ($permission.AccessControlType -eq "Deny") {
+ If (!$permissionDeny.Contains($permission.AccountName)) {
+ Unblock-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+ }
+ }
+ ElseIf ($permission.AccessControlType -eq "Allow") {
+ If ($permission.AccessRight -eq "Full") {
+ If (!$permissionFull.Contains($permission.AccountName)) {
+ Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+
+ Continue
+ }
+
+ # user got requested permissions
+ $permissionFull.remove($permission.AccountName)
+ }
+ ElseIf ($permission.AccessRight -eq "Change") {
+ If (!$permissionChange.Contains($permission.AccountName)) {
+ Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+
+ Continue
+ }
+
+ # user got requested permissions
+ $permissionChange.remove($permission.AccountName)
+ }
+ ElseIf ($permission.AccessRight -eq "Read") {
+ If (!$permissionRead.Contains($permission.AccountName)) {
+ Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+
+ Continue
+ }
+
+ # user got requested permissions
+ $permissionRead.Remove($permission.AccountName)
+ }
+ }
+ }
+
+ # add missing permissions
+ ForEach ($user in $permissionRead) {
+ Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Read"
+ Set-Attr $result "changed" $true;
+ }
+ ForEach ($user in $permissionChange) {
+ Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Change"
+ Set-Attr $result "changed" $true;
+ }
+ ForEach ($user in $permissionFull) {
+ Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Full"
+ Set-Attr $result "changed" $true;
+ }
+ ForEach ($user in $permissionDeny) {
+ Block-SmbShareAccess -Force -Name $name -AccountName $user
+ Set-Attr $result "changed" $true;
+ }
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to create share $name"
+}
+
+Exit-Json $result \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_share.py b/lib/ansible/modules/extras/windows/win_share.py
new file mode 100644
index 0000000000..14608e6e17
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_share.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_share
+version_added: "2.1"
+short_description: Manage Windows shares
+description:
+ - Add, modify or remove Windows share and set share permissions.
+requirements:
+ - Windows 8.1 / Windows 2012 or newer
+options:
+ name:
+ description:
+ - Share name
+ required: yes
+ path:
+ description:
+ - Share directory
+ required: yes
+ state:
+ description:
+ - Specify whether to add C(present) or remove C(absent) the specified share
+ required: no
+ choices:
+ - present
+ - absent
+ default: present
+ description:
+ description:
+ - Share description
+ required: no
+ default: none
+ list:
+ description:
+ - Specify whether to allow or deny file listing, in case user got no permission on share
+ required: no
+ choices:
+ - yes
+ - no
+ default: none
+ read:
+ description:
+ - Specify user list that should get read access on share, separated by comma.
+ required: no
+ default: none
+ change:
+ description:
+ - Specify user list that should get read and write access on share, separated by comma.
+ required: no
+ default: none
+ full:
+ description:
+ - Specify user list that should get full access on share, separated by comma.
+ required: no
+ default: none
+ deny:
+ description:
+ - Specify user list that should get no access, regardless of implied access on share, separated by comma.
+ required: no
+ default: none
+author: Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Playbook example
+# Add share and set permissions
+---
+- name: Add secret share
+ win_share:
+ name: internal
+ description: top secret share
+ path: C:/shares/internal
+ list: 'no'
+ full: Administrators,CEO
+ read: HR-Global
+ deny: HR-External
+
+- name: Add public company share
+ win_share:
+ name: company
+ description: top secret share
+ path: C:/shares/company
+ list: 'yes'
+ full: Administrators,CEO
+ read: Global
+
+# Remove previously added share
+ win_share:
+ name: internal
+ state: absent
+'''
+
+RETURN = '''
+
+''' \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_timezone.ps1 b/lib/ansible/modules/extras/windows/win_timezone.ps1
new file mode 100644
index 0000000000..03a6935052
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_timezone.ps1
@@ -0,0 +1,71 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_timezone = New-Object psobject
+ changed = $false
+}
+
+$timezone = Get-Attr -obj $params -name timezone -failifempty $true -resultobj $result
+
+Try {
+ # Get the current timezone set
+ $currentTZ = $(tzutil.exe /g)
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." }
+
+ If ( $currentTZ -eq $timezone ) {
+ Exit-Json $result "$timezone is already set on this machine"
+ }
+ Else {
+ $tzExists = $false
+ #Check that timezone can even be set (if it is listed from tzutil as an available timezone to the machine)
+ $tzList = $(tzutil.exe /l)
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when listing the available timezones." }
+ ForEach ($tz in $tzList) {
+ If ( $tz -eq $timezone ) {
+ $tzExists = $true
+ break
+ }
+ }
+
+ If ( $tzExists ) {
+ tzutil.exe /s "$timezone"
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when setting the specified timezone with tzutil." }
+ $newTZ = $(tzutil.exe /g)
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." }
+
+ If ( $timezone -eq $newTZ ) {
+ $result.changed = $true
+ }
+ }
+ Else {
+ Fail-Json $result "The specified timezone: $timezone isn't supported on the machine."
+ }
+ }
+}
+Catch {
+ Fail-Json $result "Error setting timezone to: $timezone."
+}
+
+
+Exit-Json $result; \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_timezone.py b/lib/ansible/modules/extras/windows/win_timezone.py
new file mode 100644
index 0000000000..2f7cf1fdc4
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_timezone.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_timezone
+version_added: "2.1"
+short_description: Sets Windows machine timezone
+description:
+ - Sets machine time to the specified timezone, the module will check if the provided timezone is supported on the machine.
+options:
+ timezone:
+ description:
+ - Timezone to set to. Example Central Standard Time
+ required: true
+ default: null
+ aliases: []
+
+author: Phil Schwartz
+'''
+
+
+EXAMPLES = '''
+ # Set machine's timezone to Central Standard Time
+ win_timezone:
+ timezone: "Central Standard Time"
+'''
+
+RETURN = '''# '''
diff --git a/lib/ansible/modules/extras/windows/win_unzip.ps1 b/lib/ansible/modules/extras/windows/win_unzip.ps1
new file mode 100644
index 0000000000..59fbd33166
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_unzip.ps1
@@ -0,0 +1,142 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_unzip = New-Object psobject
+ changed = $false
+}
+
+$creates = Get-AnsibleParam -obj $params -name "creates"
+If ($creates -ne $null) {
+ If (Test-Path $params.creates) {
+ Exit-Json $result "The 'creates' file or directory already exists."
+ }
+}
+
+$src = Get-AnsibleParam -obj $params -name "src" -failifempty $true
+If (-Not (Test-Path -path $src)){
+ Fail-Json $result "src file: $src does not exist."
+}
+
+$ext = [System.IO.Path]::GetExtension($src)
+
+
+$dest = Get-AnsibleParam -obj $params -name "dest" -failifempty $true
+If (-Not (Test-Path $dest -PathType Container)){
+ Try{
+ New-Item -itemtype directory -path $dest
+ }
+ Catch {
+ $err_msg = $_.Exception.Message
+ Fail-Json $result "Error creating $dest directory! Msg: $err_msg"
+ }
+}
+
+$recurse = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "recurse" -default "false")
+$rm = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "rm" -default "false")
+
+If ($ext -eq ".zip" -And $recurse -eq $false) {
+ Try {
+ $shell = New-Object -ComObject Shell.Application
+ $zipPkg = $shell.NameSpace([IO.Path]::GetFullPath($src))
+ $destPath = $shell.NameSpace([IO.Path]::GetFullPath($dest))
+ # 20 means do not display any dialog (4) and overwrite any file (16)
+ $destPath.CopyHere($zipPkg.Items(), 20)
+ $result.changed = $true
+ }
+ Catch {
+ $err_msg = $_.Exception.Message
+ Fail-Json $result "Error unzipping $src to $dest! Msg: $err_msg"
+ }
+}
+# Requires PSCX
+Else {
+ # Check if PSCX is installed
+ $list = Get-Module -ListAvailable
+
+ If (-Not ($list -match "PSCX")) {
+ Fail-Json $result "PowerShellCommunityExtensions PowerShell Module (PSCX) is required for non-'.zip' compressed archive types."
+ }
+ Else {
+ Set-Attr $result.win_unzip "pscx_status" "present"
+ }
+
+ # Import
+ Try {
+ Import-Module PSCX
+ }
+ Catch {
+ Fail-Json $result "Error importing module PSCX"
+ }
+
+ Try {
+ If ($recurse) {
+ Expand-Archive -Path $src -OutputPath $dest -Force
+
+ If ($rm -eq $true) {
+ Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % {
+ Expand-Archive $_.FullName -OutputPath $dest -Force
+ Remove-Item $_.FullName -Force
+ }
+ }
+ Else {
+ Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % {
+ Expand-Archive $_.FullName -OutputPath $dest -Force
+ }
+ }
+ }
+ Else {
+ Expand-Archive -Path $src -OutputPath $dest -Force
+ }
+ }
+ Catch {
+ $err_msg = $_.Exception.Message
+ If ($recurse) {
+ Fail-Json $result "Error recursively expanding $src to $dest! Msg: $err_msg"
+ }
+ Else {
+ Fail-Json $result "Error expanding $src to $dest! Msg: $err_msg"
+ }
+ }
+}
+
+If ($rm -eq $true){
+ Remove-Item $src -Recurse -Force
+ Set-Attr $result.win_unzip "rm" "true"
+}
+
+# Fixes a fail error message (when the task actually succeeds) for a "Convert-ToJson: The converted JSON string is in bad format"
+# This happens when JSON is parsing a string that ends with a "\", which is possible when specifying a directory to download to.
+# This catches that possible error, before assigning the JSON $result
+If ($src[$src.length-1] -eq "\") {
+ $src = $src.Substring(0, $src.length-1)
+}
+If ($dest[$dest.length-1] -eq "\") {
+ $dest = $dest.Substring(0, $dest.length-1)
+}
+Set-Attr $result.win_unzip "src" $src.toString()
+Set-Attr $result.win_unzip "dest" $dest.toString()
+Set-Attr $result.win_unzip "recurse" $recurse.toString()
+
+Exit-Json $result;
diff --git a/lib/ansible/modules/extras/windows/win_unzip.py b/lib/ansible/modules/extras/windows/win_unzip.py
new file mode 100644
index 0000000000..b24e6c6b29
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_unzip.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_unzip
+version_added: "2.0"
+short_description: Unzips compressed files and archives on the Windows node
+description:
+ - Unzips compressed files and archives. For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX) has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
+options:
+ src:
+ description:
+ - File to be unzipped (provide absolute path)
+ required: true
+ dest:
+ description:
+ - Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created.
+ required: true
+ rm:
+ description:
+ - Remove the zip file, after unzipping
+ required: no
+ choices:
+ - true
+ - false
+ - yes
+ - no
+ default: false
+ recurse:
+ description:
+ - Recursively expand zipped files within the src file.
+ required: no
+ default: false
+ choices:
+ - true
+ - false
+ - yes
+ - no
+ creates:
+ description:
+ - If this file or directory exists the specified src will not be extracted.
+ required: no
+ default: null
+author: Phil Schwartz
+'''
+
+EXAMPLES = r'''
+# This unzips a library that was downloaded with win_get_url, and removes the file after extraction
+$ ansible -i hosts -m win_unzip -a "src=C:\LibraryToUnzip.zip dest=C:\Lib rm=true" all
+# Playbook example
+
+# Simple unzip
+---
+- name: Unzip a bz2 (BZip) file
+ win_unzip:
+ src: "C:\Users\Phil\Logs.bz2"
+ dest: "C:\Users\Phil\OldLogs"
+ creates: "C:\Users\Phil\OldLogs"
+
+# This playbook example unzips a .zip file and recursively decompresses the contained .gz files and removes all unneeded compressed files after completion.
+---
+- name: Unzip ApplicationLogs.zip and decompress all GZipped log files
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Recursively decompress GZ files in ApplicationLogs.zip
+ win_unzip:
+ src: C:\Downloads\ApplicationLogs.zip
+ dest: C:\Application\Logs
+ recurse: yes
+ rm: true
+
+# Install PSCX to use for extracting a gz file
+ - name: Grab PSCX msi
+ win_get_url:
+ url: 'http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959'
+ dest: 'C:\pscx.msi'
+ - name: Install PSCX
+ win_msi:
+ path: 'C:\pscx.msi'
+ - name: Unzip gz log
+ win_unzip:
+ src: "C:\Logs\application-error-logs.gz"
+ dest: "C:\ExtractedLogs\application-error-logs"
+'''
diff --git a/lib/ansible/modules/extras/windows/win_updates.ps1 b/lib/ansible/modules/extras/windows/win_updates.ps1
new file mode 100644
index 0000000000..a74e68f366
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_updates.ps1
@@ -0,0 +1,424 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Matt Davis <mdavis@rolpdog.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$ErrorActionPreference = "Stop"
+$FormatEnumerationLimit = -1 # prevent out-string et al from truncating collection dumps
+
+<# Most of the Windows Update Agent API will not run under a remote token,
+which a remote WinRM session always has. win_updates uses the Task Scheduler
+to run the bulk of the update functionality under a local token. Powershell's
+Scheduled-Job capability provides a decent abstraction over the Task Scheduler
+and handles marshaling Powershell args in and output/errors/etc back. The
+module schedules a single job that executes all interactions with the Update
+Agent API, then waits for completion. A significant amount of hassle is
+involved to ensure that only one of these jobs is running at a time, and to
+clean up the various error conditions that can occur. #>
+
+# define the ScriptBlock that will be passed to Register-ScheduledJob
+$job_body = {
+ Param(
+ [hashtable]$boundparms=@{},
+ [Object[]]$unboundargs=$()
+ )
+
+ Set-StrictMode -Version 2
+
+ $ErrorActionPreference = "Stop"
+ $DebugPreference = "Continue"
+ $FormatEnumerationLimit = -1 # prevent out-string et al from truncating collection dumps
+
+ # set this as a global for the Write-DebugLog function
+ $log_path = $boundparms['log_path']
+
+ Write-DebugLog "Scheduled job started with boundparms $($boundparms | out-string) and unboundargs $($unboundargs | out-string)"
+
+ # FUTURE: elevate this to module arg validation once we have it
+ Function MapCategoryNameToGuid {
+ Param([string] $category_name)
+
+ $category_guid = switch -exact ($category_name) {
+ # as documented by TechNet @ https://technet.microsoft.com/en-us/library/ff730937.aspx
+ "Application" {"5C9376AB-8CE6-464A-B136-22113DD69801"}
+ "Connectors" {"434DE588-ED14-48F5-8EED-A15E09A991F6"}
+ "CriticalUpdates" {"E6CF1350-C01B-414D-A61F-263D14D133B4"}
+ "DefinitionUpdates" {"E0789628-CE08-4437-BE74-2495B842F43B"}
+ "DeveloperKits" {"E140075D-8433-45C3-AD87-E72345B36078"}
+ "FeaturePacks" {"B54E7D24-7ADD-428F-8B75-90A396FA584F"}
+ "Guidance" {"9511D615-35B2-47BB-927F-F73D8E9260BB"}
+ "SecurityUpdates" {"0FA1201D-4330-4FA8-8AE9-B877473B6441"}
+ "ServicePacks" {"68C5B0A3-D1A6-4553-AE49-01D3A7827828"}
+ "Tools" {"B4832BD8-E735-4761-8DAF-37F882276DAB"}
+ "UpdateRollups" {"28BC880E-0592-4CBF-8F95-C79B17911D5F"}
+ "Updates" {"CD5FFD1E-E932-4E3A-BF74-18BF0B1BBD83"}
+ default { throw "Unknown category_name $category_name, must be one of (Application,Connectors,CriticalUpdates,DefinitionUpdates,DeveloperKits,FeaturePacks,Guidance,SecurityUpdates,ServicePacks,Tools,UpdateRollups,Updates)" }
+ }
+
+ return $category_guid
+ }
+
+ Function DoWindowsUpdate {
+ Param(
+ [string[]]$category_names=@("CriticalUpdates","SecurityUpdates","UpdateRollups"),
+ [ValidateSet("installed", "searched")]
+ [string]$state="installed",
+ [bool]$_ansible_check_mode=$false
+ )
+
+ $is_check_mode = $($state -eq "searched") -or $_ansible_check_mode
+
+ $category_guids = $category_names | % { MapCategoryNameToGUID $_ }
+
+ $update_status = @{ changed = $false }
+
+ Write-DebugLog "Creating Windows Update session..."
+ $session = New-Object -ComObject Microsoft.Update.Session
+
+ Write-DebugLog "Create Windows Update searcher..."
+ $searcher = $session.CreateUpdateSearcher()
+
+ # OR is only allowed at the top-level, so we have to repeat base criteria inside
+ # FUTURE: change this to client-side filtered?
+ $criteriabase = "IsInstalled = 0"
+ $criteria_list = $category_guids | % { "($criteriabase AND CategoryIDs contains '$_')" }
+
+ $criteria = [string]::Join(" OR ", $criteria_list)
+
+ Write-DebugLog "Search criteria: $criteria"
+
+ Write-DebugLog "Searching for updates to install in category IDs $category_guids..."
+ $searchresult = $searcher.Search($criteria)
+
+ Write-DebugLog "Creating update collection..."
+
+ $updates_to_install = New-Object -ComObject Microsoft.Update.UpdateColl
+
+ Write-DebugLog "Found $($searchresult.Updates.Count) updates"
+
+ $update_status.updates = @{ }
+
+ # FUTURE: add further filtering options
+ foreach($update in $searchresult.Updates) {
+ if(-Not $update.EulaAccepted) {
+ Write-DebugLog "Accepting EULA for $($update.Identity.UpdateID)"
+ $update.AcceptEula()
+ }
+
+ if($update.IsHidden) {
+ Write-DebugLog "Skipping hidden update $($update.Title)"
+ continue
+ }
+
+ Write-DebugLog "Adding update $($update.Identity.UpdateID) - $($update.Title)"
+ $res = $updates_to_install.Add($update)
+
+ $update_status.updates[$update.Identity.UpdateID] = @{
+ title = $update.Title
+ # TODO: pluck the first KB out (since most have just one)?
+ kb = $update.KBArticleIDs
+ id = $update.Identity.UpdateID
+ installed = $false
+ }
+ }
+
+ Write-DebugLog "Calculating pre-install reboot requirement..."
+
+ # calculate this early for check mode, and to see if we should allow updates to continue
+ $sysinfo = New-Object -ComObject Microsoft.Update.SystemInfo
+ $update_status.reboot_required = $sysinfo.RebootRequired
+ $update_status.found_update_count = $updates_to_install.Count
+ $update_status.installed_update_count = 0
+
+ # bail out here for check mode
+ if($is_check_mode -eq $true) {
+ Write-DebugLog "Check mode; exiting..."
+ Write-DebugLog "Return value: $($update_status | out-string)"
+
+ if($updates_to_install.Count -gt 0) { $update_status.changed = $true }
+ return $update_status
+ }
+
+ if($updates_to_install.Count -gt 0) {
+ if($update_status.reboot_required) {
+ throw "A reboot is required before more updates can be installed."
+ }
+ else {
+ Write-DebugLog "No reboot is pending..."
+ }
+ Write-DebugLog "Downloading updates..."
+ }
+
+ foreach($update in $updates_to_install) {
+ if($update.IsDownloaded) {
+ Write-DebugLog "Update $($update.Identity.UpdateID) already downloaded, skipping..."
+ continue
+ }
+ Write-DebugLog "Creating downloader object..."
+ $dl = $session.CreateUpdateDownloader()
+ Write-DebugLog "Creating download collection..."
+ $dl.Updates = New-Object -ComObject Microsoft.Update.UpdateColl
+ Write-DebugLog "Adding update $($update.Identity.UpdateID)"
+ $res = $dl.Updates.Add($update)
+ Write-DebugLog "Downloading update $($update.Identity.UpdateID)..."
+ $download_result = $dl.Download()
+ # FUTURE: configurable download retry
+ if($download_result.ResultCode -ne 2) { # OperationResultCode orcSucceeded
+ throw "Failed to download update $($update.Identity.UpdateID)"
+ }
+ }
+
+ if($updates_to_install.Count -lt 1 ) { return $update_status }
+
+ Write-DebugLog "Installing updates..."
+
+ # install as a batch so the reboot manager will suppress intermediate reboots
+ Write-DebugLog "Creating installer object..."
+ $inst = $session.CreateUpdateInstaller()
+ Write-DebugLog "Creating install collection..."
+ $inst.Updates = New-Object -ComObject Microsoft.Update.UpdateColl
+
+ foreach($update in $updates_to_install) {
+ Write-DebugLog "Adding update $($update.Identity.UpdateID)"
+ $res = $inst.Updates.Add($update)
+ }
+
+ # FUTURE: use BeginInstall w/ progress reporting so we can at least log intermediate install results
+ Write-DebugLog "Installing updates..."
+ $install_result = $inst.Install()
+
+ $update_success_count = 0
+ $update_fail_count = 0
+
+ # WU result API requires us to index in to get the install results
+ $update_index = 0
+
+ foreach($update in $updates_to_install) {
+ $update_result = $install_result.GetUpdateResult($update_index)
+ $update_resultcode = $update_result.ResultCode
+ $update_hresult = $update_result.HResult
+
+ $update_index++
+
+ $update_dict = $update_status.updates[$update.Identity.UpdateID]
+
+ if($update_resultcode -eq 2) { # OperationResultCode orcSucceeded
+ $update_success_count++
+ $update_dict.installed = $true
+ Write-DebugLog "Update $($update.Identity.UpdateID) succeeded"
+ }
+ else {
+ $update_fail_count++
+ $update_dict.installed = $false
+ $update_dict.failed = $true
+ $update_dict.failure_hresult_code = $update_hresult
+ Write-DebugLog "Update $($update.Identity.UpdateID) failed resultcode $update_hresult hresult $update_hresult"
+ }
+
+ }
+
+ if($update_fail_count -gt 0) {
+ $update_status.failed = $true
+ $update_status.msg="Failed to install one or more updates"
+ }
+ else { $update_status.changed = $true }
+
+ Write-DebugLog "Performing post-install reboot requirement check..."
+
+ # recalculate reboot status after installs
+ $sysinfo = New-Object -ComObject Microsoft.Update.SystemInfo
+ $update_status.reboot_required = $sysinfo.RebootRequired
+ $update_status.installed_update_count = $update_success_count
+ $update_status.failed_update_count = $update_fail_count
+
+ Write-DebugLog "Return value: $($update_status | out-string)"
+
+ return $update_status
+ }
+
+ Try {
+ # job system adds a bunch of cruft to top-level dict, so we have to send a sub-dict
+ return @{ job_output = DoWindowsUpdate @boundparms }
+ }
+ Catch {
+ $excep = $_
+ Write-DebugLog "Fatal exception: $($excep.Exception.Message) at $($excep.ScriptStackTrace)"
+ return @{ job_output = @{ failed=$true;error=$excep.Exception.Message;location=$excep.ScriptStackTrace } }
+ }
+}
+
+Function DestroyScheduledJob {
+ Param([string] $job_name)
+
+ # find a scheduled job with the same name (should normally fail)
+ $schedjob = Get-ScheduledJob -Name $job_name -ErrorAction SilentlyContinue
+
+ # nuke it if it's there
+ If($schedjob -ne $null) {
+ Write-DebugLog "ScheduledJob $job_name exists, ensuring it's not running..."
+ # can't manage jobs across sessions, so we have to resort to the Task Scheduler script object to kill running jobs
+ $schedserv = New-Object -ComObject Schedule.Service
+ Write-DebugLog "Connecting to scheduler service..."
+ $schedserv.Connect()
+ Write-DebugLog "Getting running tasks named $job_name"
+ $running_tasks = @($schedserv.GetRunningTasks(0) | Where-Object { $_.Name -eq $job_name })
+
+ Foreach($task_to_stop in $running_tasks) {
+ Write-DebugLog "Stopping running task $($task_to_stop.InstanceGuid)..."
+ $task_to_stop.Stop()
+ }
+
+ <# FUTURE: add a global waithandle for this to release any other waiters. Wait-Job
+ and/or polling will block forever, since the killed job object in the parent
+ session doesn't know it's been killed :( #>
+
+ Unregister-ScheduledJob -Name $job_name
+ }
+
+}
+
+Function RunAsScheduledJob {
+ Param([scriptblock] $job_body, [string] $job_name, [scriptblock] $job_init, [Object[]] $job_arg_list=@())
+
+ DestroyScheduledJob -job_name $job_name
+
+ $rsj_args = @{
+ ScriptBlock = $job_body
+ Name = $job_name
+ ArgumentList = $job_arg_list
+ ErrorAction = "Stop"
+ ScheduledJobOption = @{ RunElevated=$True }
+ }
+
+ if($job_init) { $rsj_args.InitializationScript = $job_init }
+
+ Write-DebugLog "Registering scheduled job with args $($rsj_args | Out-String -Width 300)"
+ $schedjob = Register-ScheduledJob @rsj_args
+
+ # RunAsTask isn't available in PS3- fall back to a 2s future trigger
+ if($schedjob | Get-Member -Name RunAsTask) {
+ Write-DebugLog "Starting scheduled job (PS4 method)"
+ $schedjob.RunAsTask()
+ }
+ else {
+ Write-DebugLog "Starting scheduled job (PS3 method)"
+ Add-JobTrigger -inputobject $schedjob -trigger $(New-JobTrigger -once -at $(Get-Date).AddSeconds(2))
+ }
+
+ $sw = [System.Diagnostics.Stopwatch]::StartNew()
+
+ $job = $null
+
+ Write-DebugLog "Waiting for job completion..."
+
+ # Wait-Job can fail for a few seconds until the scheduled task starts- poll for it...
+ while ($job -eq $null) {
+ start-sleep -Milliseconds 100
+ if($sw.ElapsedMilliseconds -ge 30000) { # tasks scheduled right after boot on 2008R2 can take awhile to start...
+ Throw "Timed out waiting for scheduled task to start"
+ }
+
+ # FUTURE: configurable timeout so we don't block forever?
+ # FUTURE: add a global WaitHandle in case another instance kills our job, so we don't block forever
+ $job = Wait-Job -Name $schedjob.Name -ErrorAction SilentlyContinue
+ }
+
+ $sw = [System.Diagnostics.Stopwatch]::StartNew()
+
+ # NB: output from scheduled jobs is delayed after completion (including the sub-objects after the primary Output object is available)
+ While (($job.Output -eq $null -or -not ($job.Output | Get-Member -Name Keys -ErrorAction Ignore) -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) {
+ Write-DebugLog "Waiting for job output to populate..."
+ Start-Sleep -Milliseconds 500
+ }
+
+ # NB: fallthru on both timeout and success
+
+ $ret = @{
+ ErrorOutput = $job.Error
+ WarningOutput = $job.Warning
+ VerboseOutput = $job.Verbose
+ DebugOutput = $job.Debug
+ }
+
+ If ($job.Output -eq $null -or -not $job.Output.Keys.Contains('job_output')) {
+ $ret.Output = @{failed = $true; msg = "job output was lost"}
+ }
+ Else {
+ $ret.Output = $job.Output.job_output # sub-object returned, can only be accessed as a property for some reason
+ }
+
+ Try { # this shouldn't be fatal, but can fail with both Powershell errors and COM Exceptions, hence the dual error-handling...
+ Unregister-ScheduledJob -Name $job_name -Force -ErrorAction Continue
+ }
+ Catch {
+ Write-DebugLog "Error unregistering job after execution: $($_.Exception.ToString()) $($_.ScriptStackTrace)"
+ }
+
+ return $ret
+}
+
+Function Log-Forensics {
+ Write-DebugLog "Arguments: $job_args | out-string"
+ Write-DebugLog "OS Version: $([environment]::OSVersion.Version | out-string)"
+ Write-DebugLog "Running as user: $([System.Security.Principal.WindowsIdentity]::GetCurrent().Name)"
+ Write-DebugLog "Powershell version: $($PSVersionTable | out-string)"
+ # FUTURE: log auth method (kerb, password, etc)
+}
+
+# code shared between the scheduled job and the host script
+$common_inject = {
+ # FUTURE: capture all to a list, dump on error
+ Function Write-DebugLog {
+ Param(
+ [string]$msg
+ )
+
+ $DebugPreference = "Continue"
+ $ErrorActionPreference = "Continue"
+ $date_str = Get-Date -Format u
+ $msg = "$date_str $msg"
+
+ Write-Debug $msg
+
+ if($log_path -ne $null) {
+ Add-Content $log_path $msg
+ }
+ }
+}
+
+# source the common code into the current scope so we can call it
+. $common_inject
+
+$parsed_args = Parse-Args $args $true
+# grr, why use PSCustomObject for args instead of just native hashtable?
+$parsed_args.psobject.properties | foreach -begin {$job_args=@{}} -process {$job_args."$($_.Name)" = $_.Value} -end {$job_args}
+
+# set the log_path for the global log function we injected earlier
+$log_path = $job_args['log_path']
+
+Log-Forensics
+
+Write-DebugLog "Starting scheduled job with args: $($job_args | Out-String -Width 300)"
+
+# pass the common code as job_init so it'll be injected into the scheduled job script
+$sjo = RunAsScheduledJob -job_init $common_inject -job_body $job_body -job_name ansible-win-updates -job_arg_list $job_args
+
+Write-DebugLog "Scheduled job completed with output: $($sjo.Output | Out-String -Width 300)"
+
+Exit-Json $sjo.Output \ No newline at end of file
diff --git a/lib/ansible/modules/extras/windows/win_updates.py b/lib/ansible/modules/extras/windows/win_updates.py
new file mode 100644
index 0000000000..efdd1146ad
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_updates.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Davis <mdavis_ansible@rolpdog.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_updates
+version_added: "2.0"
+short_description: Download and install Windows updates
+description:
+ - Searches, downloads, and installs Windows updates synchronously by automating the Windows Update client
+options:
+ category_names:
+ description:
+ - A scalar or list of categories to install updates from
+ required: false
+ default: ["CriticalUpdates","SecurityUpdates","UpdateRollups"]
+ choices:
+ - Application
+ - Connectors
+ - CriticalUpdates
+ - DefinitionUpdates
+ - DeveloperKits
+ - FeaturePacks
+ - Guidance
+ - SecurityUpdates
+ - ServicePacks
+ - Tools
+ - UpdateRollups
+ - Updates
+ state:
+ description:
+ - Controls whether found updates are returned as a list or actually installed.
+ - This module also supports Ansible check mode, which has the same effect as setting state=searched
+ required: false
+ default: installed
+ choices:
+ - installed
+ - searched
+ log_path:
+ description:
+ - If set, win_updates will append update progress to the specified file. The directory must already exist.
+ required: false
+author: "Matt Davis (@mattdavispdx)"
+notes:
+- win_updates must be run by a user with membership in the local Administrators group
+- win_updates will use the default update service configured for the machine (Windows Update, Microsoft Update, WSUS, etc)
+- win_updates does not manage reboots, but will signal when a reboot is required with the reboot_required return value.
+- win_updates can take a significant amount of time to complete (hours, in some cases). Performance depends on many factors, including OS version, number of updates, system load, and update server load.
+'''
+
+EXAMPLES = '''
+ # Install all security, critical, and rollup updates
+ win_updates:
+ category_names: ['SecurityUpdates','CriticalUpdates','UpdateRollups']
+
+ # Install only security updates
+ win_updates: category_names=SecurityUpdates
+
+ # Search-only, return list of found updates (if any), log to c:\ansible_wu.txt
+ win_updates: category_names=SecurityUpdates state=searched log_path=c:/ansible_wu.txt
+'''
+
+RETURN = '''
+reboot_required:
+ description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot)
+ returned: success
+ type: boolean
+ sample: True
+
+updates:
+ description: List of updates that were found/installed
+ returned: success
+ type: dictionary
+ sample:
+ contains:
+ title:
+ description: Display name
+ returned: always
+ type: string
+ sample: "Security Update for Windows Server 2012 R2 (KB3004365)"
+ kb:
+ description: A list of KB article IDs that apply to the update
+ returned: always
+ type: list of strings
+ sample: [ '3004365' ]
+ id:
+ description: Internal Windows Update GUID
+ returned: always
+ type: string (guid)
+ sample: "fb95c1c8-de23-4089-ae29-fd3351d55421"
+ installed:
+ description: Was the update successfully installed
+ returned: always
+ type: boolean
+ sample: True
+ failure_hresult_code:
+ description: The HRESULT code from a failed update
+ returned: on install failure
+ type: boolean
+ sample: 2147942402
+
+found_update_count:
+ description: The number of updates found needing to be applied
+ returned: success
+ type: int
+ sample: 3
+installed_update_count:
+ description: The number of updates successfully installed
+ returned: success
+ type: int
+ sample: 2
+failed_update_count:
+ description: The number of updates that failed to install
+ returned: always
+ type: int
+ sample: 0
+'''
diff --git a/lib/ansible/modules/extras/windows/win_uri.ps1 b/lib/ansible/modules/extras/windows/win_uri.ps1
new file mode 100644
index 0000000000..b02418e891
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_uri.ps1
@@ -0,0 +1,81 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Corwin Brown <corwin@corwinbrown.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_uri = New-Object psobject
+}
+
+# Functions ###############################################
+
+Function ConvertTo-SnakeCase($input_string) {
+ $snake_case = $input_string -csplit "(?<!^)(?=[A-Z])" -join "_"
+ $snake_case = $snake_case.ToLower()
+
+ return $snake_case
+}
+
+# Build Arguments
+$webrequest_opts = @{}
+
+$url = Get-AnsibleParam -obj $params -name "url" -failifempty $true
+$method = Get-AnsibleParam -obj $params "method" -default "GET"
+$content_type = Get-AnsibleParam -obj $params -name "content_type"
+$headers = Get-AnsibleParam -obj $params -name "headers"
+$body = Get-AnsibleParam -obj $params -name "body"
+$use_basic_parsing = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "use_basic_parsing" -default $true)
+
+$webrequest_opts.Uri = $url
+Set-Attr $result.win_uri "url" $url
+
+$webrequest_opts.Method = $method
+Set-Attr $result.win_uri "method" $method
+
+$webrequest_opts.ContentType = $content_type
+Set-Attr $result.win_uri "content_type" $content_type
+
+$webrequest_opts.UseBasicParsing = $use_basic_parsing
+Set-Attr $result.win_uri "use_basic_parsing" $use_basic_parsing
+
+if ($headers -ne $null) {
+ $req_headers = @{}
+ ForEach ($header in $headers.psobject.properties) {
+ $req_headers.Add($header.Name, $header.Value)
+ }
+
+ $webrequest_opts.Headers = $req_headers
+}
+
+try {
+ $response = Invoke-WebRequest @webrequest_opts
+} catch {
+ $ErrorMessage = $_.Exception.Message
+ Fail-Json $result $ErrorMessage
+}
+
+ForEach ($prop in $response.psobject.properties) {
+ $result_key = ConvertTo-SnakeCase $prop.Name
+ $result_value = $prop.Value
+ Set-Attr $result $result_key $result_value
+}
+
+Exit-Json $result
diff --git a/lib/ansible/modules/extras/windows/win_uri.py b/lib/ansible/modules/extras/windows/win_uri.py
new file mode 100644
index 0000000000..7045f70bd4
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_uri.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Corwin Brown <corwin@corwinbrown.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = """
+---
+module: win_uri
+version_added: "2.1"
+short_description: Interacts with webservices.
+description:
+ - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE HTTP authentication mechanisms.
+options:
+ url:
+ description:
+ - HTTP or HTTPS URL in the form of (http|https)://host.domain:port/path
+ method:
+ description:
+ - The HTTP Method of the request or response.
+ default: GET
+ choices:
+ - GET
+ - POST
+ - PUT
+ - HEAD
+ - DELETE
+ - OPTIONS
+ - PATCH
+ - TRACE
+ - CONNECT
+ - REFRESH
+ content_type:
+ description:
+ - Sets the "Content-Type" header.
+ body:
+ description:
+ - The body of the HTTP request/response to the web service.
+ headers:
+ description:
+ - 'Key Value pairs for headers. Example "Host: www.somesite.com"'
+ use_basic_parsing:
+ description:
+ - This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here.
+ choices:
+ - True
+ - False
+ default: True
+author: Corwin Brown (@blakfeld)
+"""
+
+EXAMPLES = """
+# Send a GET request and store the output:
+---
+- name: Perform a GET and Store Output
+ win_uri:
+ url: http://www.somesite.com/myendpoint
+ register: http_output
+
+# Set a HOST header to hit an internal webserver:
+---
+- name: Hit a Specific Host on the Server
+ win_uri:
+ url: http://my.internal.server.com
+ method: GET
+ headers:
+ host: "www.somesite.com"
+
+# Do a HEAD request on an endpoint
+---
+- name: Perform a HEAD on an Endpoint
+ win_uri:
+ url: http://www.somesite.com
+ method: HEAD
+
+# Post a body to an endpoint
+---
+- name: POST a Body to an Endpoint
+ win_uri:
+ url: http://www.somesite.com
+ method: POST
+ body: "{ 'some': 'json' }"
+"""
+
+RETURN = """
+url:
+ description: The Target URL
+ returned: always
+ type: string
+ sample: "https://www.ansible.com"
+method:
+ description: The HTTP method used.
+ returned: always
+ type: string
+ sample: "GET"
+content_type:
+ description: The "content-type" header used.
+ returned: always
+ type: string
+ sample: "application/json"
+use_basic_parsing:
+ description: The state of the "use_basic_parsing" flag.
+ returned: always
+ type: bool
+ sample: True
+status_code:
+ description: The HTTP Status Code of the response.
+ returned: success
+ type: int
+ sample: 200
+status_description:
+ description: A summery of the status.
+ returned: success
+ type: string
+ stample: "OK"
+raw_content:
+ description: The raw content of the HTTP response.
+ returned: success
+ type: string
+ sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nX-Frame-Options: SAMEORIGIN\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.google.com:443"; ma=2592000; v="30,29,28,27,26,25",quic=":443"; ma=2...'
+headers:
+ description: The Headers of the response.
+ returned: success
+ type: dict
+ sample: {"Content-Type": "application/json"}
+raw_content_length:
+ description: The byte size of the response.
+ returned: success
+ type: int
+ sample: 54447
+"""
diff --git a/lib/ansible/modules/extras/windows/win_webpicmd.ps1 b/lib/ansible/modules/extras/windows/win_webpicmd.ps1
new file mode 100644
index 0000000000..a8624739d7
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_webpicmd.ps1
@@ -0,0 +1,132 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Peter Mounce <public@neverrunwithscissors.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$package = Get-AnsibleParam $params -name "name" -failifempty $true
+
+Function Find-Command
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true, Position=0)] [string] $command
+ )
+ $installed = get-command $command -erroraction Ignore
+ write-verbose "$installed"
+ if ($installed)
+ {
+ return $installed
+ }
+ return $null
+}
+
+Function Find-WebPiCmd
+{
+ [CmdletBinding()]
+ param()
+ $p = Find-Command "webpicmd.exe"
+ if ($p -ne $null)
+ {
+ return $p
+ }
+ $a = Find-Command "c:\programdata\chocolatey\bin\webpicmd.exe"
+ if ($a -ne $null)
+ {
+ return $a
+ }
+ Throw "webpicmd.exe is not installed. It must be installed (use chocolatey)"
+}
+
+Function Test-IsInstalledFromWebPI
+{
+ [CmdletBinding()]
+
+ param(
+ [Parameter(Mandatory=$true, Position=0)]
+ [string]$package
+ )
+
+ $cmd = "$executable /list /listoption:installed"
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "webpicmd_error_cmd" $cmd
+ Set-Attr $result "webpicmd_error_log" "$results"
+
+ Throw "Error checking installation status for $package"
+ }
+ Write-Verbose "$results"
+
+ if ($results -match "^$package\s+")
+ {
+ return $true
+ }
+
+ return $false
+}
+
+Function Install-WithWebPICmd
+{
+ [CmdletBinding()]
+
+ param(
+ [Parameter(Mandatory=$true, Position=0)]
+ [string]$package
+ )
+
+ $cmd = "$executable /install /products:$package /accepteula /suppressreboot"
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "webpicmd_error_cmd" $cmd
+ Set-Attr $result "webpicmd_error_log" "$results"
+ Throw "Error installing $package"
+ }
+
+ write-verbose "$results"
+
+ if ($results -match "Install of Products: SUCCESS")
+ {
+ $result.changed = $true
+ }
+}
+
+Try
+{
+ $script:executable = Find-WebPiCmd
+ if ((Test-IsInstalledFromWebPI -package $package) -eq $false)
+ {
+ Install-WithWebPICmd -package $package
+ }
+
+ Exit-Json $result;
+}
+Catch
+{
+ Fail-Json $result $_.Exception.Message
+}
diff --git a/lib/ansible/modules/extras/windows/win_webpicmd.py b/lib/ansible/modules/extras/windows/win_webpicmd.py
new file mode 100644
index 0000000000..215123cef8
--- /dev/null
+++ b/lib/ansible/modules/extras/windows/win_webpicmd.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Peter Mounce <public@neverrunwithscissors.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_webpicmd
+version_added: "2.0"
+short_description: Installs packages using Web Platform Installer command-line
+description:
+ - Installs packages using Web Platform Installer command-line (http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release).
+ - Must be installed and present in PATH (see win_chocolatey module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)
+ - Install IIS first (see win_feature module)
+notes:
+ - accepts EULAs and suppresses reboot - you will need to check manage reboots yourself (see win_reboot module)
+options:
+ name:
+ description:
+ - Name of the package to be installed
+ required: true
+author: Peter Mounce
+'''
+
+EXAMPLES = '''
+ # Install URLRewrite2.
+ win_webpicmd:
+ name: URLRewrite2
+'''