summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitmodules6
-rw-r--r--.travis.yml4
-rw-r--r--CHANGELOG.md437
-rw-r--r--MANIFEST.in2
-rw-r--r--VERSION2
-rwxr-xr-xcontrib/inventory/abiquo.py28
-rwxr-xr-xcontrib/inventory/apache-libcloud.py17
-rwxr-xr-xcontrib/inventory/cloudstack.py18
-rwxr-xr-xcontrib/inventory/cobbler.py17
-rwxr-xr-xcontrib/inventory/collins.py18
-rwxr-xr-xcontrib/inventory/consul_io.py11
-rwxr-xr-xcontrib/inventory/digital_ocean.py16
-rwxr-xr-xcontrib/inventory/docker.py2
-rwxr-xr-xcontrib/inventory/ec2.py6
-rwxr-xr-xcontrib/inventory/fleet.py4
-rwxr-xr-xcontrib/inventory/freeipa.py4
-rwxr-xr-xcontrib/inventory/gce.py6
-rwxr-xr-xcontrib/inventory/jail.py6
-rwxr-xr-xcontrib/inventory/libvirt_lxc.py6
-rwxr-xr-xcontrib/inventory/linode.py20
-rw-r--r--contrib/inventory/nagios_ndo.ini10
-rwxr-xr-xcontrib/inventory/nagios_ndo.py109
-rwxr-xr-xcontrib/inventory/nova.py5
-rwxr-xr-xcontrib/inventory/openshift.py34
-rwxr-xr-xcontrib/inventory/openvz.py6
-rwxr-xr-xcontrib/inventory/ovirt.py4
-rwxr-xr-xcontrib/inventory/proxmox.py18
-rwxr-xr-xcontrib/inventory/rax.py8
-rwxr-xr-xcontrib/inventory/softlayer.py4
-rwxr-xr-xcontrib/inventory/spacewalk.py48
-rwxr-xr-xcontrib/inventory/ssh_config.py4
-rwxr-xr-xcontrib/inventory/vagrant.py4
-rwxr-xr-xcontrib/inventory/vbox.py2
-rwxr-xr-xcontrib/inventory/vmware.py12
-rwxr-xr-xcontrib/inventory/windows_azure.py16
-rwxr-xr-xcontrib/inventory/zabbix.py17
-rwxr-xr-xcontrib/inventory/zone.py6
-rw-r--r--docs/man/man1/ansible-playbook.1.asciidoc.in5
-rw-r--r--docs/man/man1/ansible-pull.1.asciidoc.in5
-rw-r--r--docs/man/man1/ansible-vault.128
-rw-r--r--docs/man/man1/ansible-vault.1.asciidoc.in30
-rw-r--r--docs/man/man1/ansible.1.asciidoc.in5
-rw-r--r--docsite/README.md2
-rw-r--r--docsite/rst/become.rst2
-rw-r--r--docsite/rst/community.rst5
-rw-r--r--docsite/rst/developing_modules.rst15
-rw-r--r--docsite/rst/developing_plugins.rst2
-rw-r--r--docsite/rst/faq.rst35
-rw-r--r--docsite/rst/guide_rax.rst14
-rw-r--r--docsite/rst/guide_vagrant.rst2
-rw-r--r--docsite/rst/intro_configuration.rst40
-rw-r--r--docsite/rst/intro_getting_started.rst6
-rw-r--r--docsite/rst/intro_installation.rst15
-rw-r--r--docsite/rst/intro_inventory.rst19
-rw-r--r--docsite/rst/intro_patterns.rst31
-rw-r--r--docsite/rst/intro_windows.rst95
-rw-r--r--docsite/rst/modules_extra.rst2
-rw-r--r--docsite/rst/playbooks_acceleration.rst3
-rw-r--r--docsite/rst/playbooks_error_handling.rst2
-rw-r--r--docsite/rst/playbooks_filters.rst112
-rw-r--r--docsite/rst/playbooks_intro.rst58
-rw-r--r--docsite/rst/playbooks_lookups.rst4
-rw-r--r--docsite/rst/playbooks_variables.rst86
-rw-r--r--examples/ansible.cfg17
-rw-r--r--examples/scripts/ConfigureRemotingForAnsible.ps147
-rwxr-xr-xexamples/scripts/yaml_to_ini.py3
-rw-r--r--hacking/env-setup4
-rw-r--r--hacking/env-setup.fish1
-rwxr-xr-xhacking/get_library.py2
-rwxr-xr-xhacking/module_formatter.py16
-rwxr-xr-xhacking/test-module32
-rw-r--r--lib/ansible/cli/__init__.py15
-rw-r--r--lib/ansible/cli/adhoc.py7
-rw-r--r--lib/ansible/cli/doc.py18
-rw-r--r--lib/ansible/cli/galaxy.py36
-rw-r--r--lib/ansible/cli/playbook.py8
-rw-r--r--lib/ansible/cli/pull.py2
-rw-r--r--lib/ansible/cli/vault.py78
-rw-r--r--lib/ansible/constants.py60
-rw-r--r--lib/ansible/executor/module_common.py26
-rw-r--r--lib/ansible/executor/play_iterator.py261
-rw-r--r--lib/ansible/executor/playbook_executor.py30
-rw-r--r--lib/ansible/executor/process/result.py8
-rw-r--r--lib/ansible/executor/process/worker.py28
-rw-r--r--lib/ansible/executor/task_executor.py149
-rw-r--r--lib/ansible/executor/task_queue_manager.py18
-rw-r--r--lib/ansible/galaxy/__init__.py8
-rw-r--r--lib/ansible/galaxy/api.py17
-rw-r--r--lib/ansible/galaxy/data/metadata_template.j22
-rw-r--r--lib/ansible/galaxy/role.py146
-rw-r--r--lib/ansible/inventory/__init__.py387
-rw-r--r--lib/ansible/inventory/dir.py20
-rw-r--r--lib/ansible/inventory/expand_hosts.py2
-rw-r--r--lib/ansible/inventory/host.py26
-rw-r--r--lib/ansible/inventory/ini.py106
-rw-r--r--lib/ansible/inventory/script.py49
-rw-r--r--lib/ansible/module_utils/cloudstack.py3
-rw-r--r--lib/ansible/module_utils/ec2.py18
-rw-r--r--lib/ansible/module_utils/f5.py3
-rw-r--r--lib/ansible/module_utils/facts.py22
-rw-r--r--lib/ansible/module_utils/known_hosts.py42
-rw-r--r--lib/ansible/module_utils/openstack.py4
-rw-r--r--lib/ansible/module_utils/powershell.ps145
-rw-r--r--lib/ansible/module_utils/vca.py303
m---------lib/ansible/modules/core13
m---------lib/ansible/modules/extras13
-rw-r--r--lib/ansible/parsing/__init__.py45
-rw-r--r--lib/ansible/parsing/mod_args.py21
-rw-r--r--lib/ansible/parsing/utils/addresses.py215
-rw-r--r--lib/ansible/parsing/vault/__init__.py197
-rw-r--r--lib/ansible/parsing/yaml/dumper.py8
-rw-r--r--lib/ansible/playbook/attribute.py38
-rw-r--r--lib/ansible/playbook/base.py42
-rw-r--r--lib/ansible/playbook/block.py35
-rw-r--r--lib/ansible/playbook/conditional.py9
-rw-r--r--lib/ansible/playbook/helpers.py6
-rw-r--r--lib/ansible/playbook/included_file.py8
-rw-r--r--lib/ansible/playbook/play.py83
-rw-r--r--lib/ansible/playbook/play_context.py113
-rw-r--r--lib/ansible/playbook/playbook_include.py4
-rw-r--r--lib/ansible/playbook/role/__init__.py21
-rw-r--r--lib/ansible/playbook/role/definition.py5
-rw-r--r--lib/ansible/playbook/role/requirement.py169
-rw-r--r--lib/ansible/playbook/task.py42
-rw-r--r--lib/ansible/plugins/__init__.py32
-rw-r--r--lib/ansible/plugins/action/__init__.py16
-rw-r--r--lib/ansible/plugins/action/add_host.py36
-rw-r--r--lib/ansible/plugins/action/async.py2
-rw-r--r--lib/ansible/plugins/action/fetch.py9
-rw-r--r--lib/ansible/plugins/action/include_vars.py2
-rw-r--r--lib/ansible/plugins/action/package.py6
-rw-r--r--lib/ansible/plugins/action/service.py3
-rw-r--r--lib/ansible/plugins/action/set_fact.py38
-rw-r--r--lib/ansible/plugins/action/synchronize.py15
-rw-r--r--lib/ansible/plugins/action/template.py12
-rw-r--r--lib/ansible/plugins/action/unarchive.py2
-rw-r--r--lib/ansible/plugins/cache/__init__.py2
-rw-r--r--lib/ansible/plugins/cache/jsonfile.py12
-rw-r--r--lib/ansible/plugins/cache/redis.py2
-rw-r--r--lib/ansible/plugins/callback/__init__.py9
-rw-r--r--lib/ansible/plugins/callback/default.py10
-rw-r--r--lib/ansible/plugins/callback/hipchat.py4
-rw-r--r--lib/ansible/plugins/callback/profile_tasks.py12
-rw-r--r--lib/ansible/plugins/connection/__init__.py (renamed from lib/ansible/plugins/connections/__init__.py)10
-rw-r--r--lib/ansible/plugins/connection/accelerate.py (renamed from lib/ansible/plugins/connections/accelerate.py)0
-rw-r--r--lib/ansible/plugins/connection/chroot.py (renamed from lib/ansible/plugins/connections/chroot.py)29
-rw-r--r--lib/ansible/plugins/connection/docker.py (renamed from lib/ansible/plugins/connections/docker.py)2
-rw-r--r--lib/ansible/plugins/connection/funcd.py (renamed from lib/ansible/plugins/connections/funcd.py)0
-rw-r--r--lib/ansible/plugins/connection/jail.py (renamed from lib/ansible/plugins/connections/jail.py)0
-rw-r--r--lib/ansible/plugins/connection/libvirt_lxc.py (renamed from lib/ansible/plugins/connections/libvirt_lxc.py)0
-rw-r--r--lib/ansible/plugins/connection/local.py (renamed from lib/ansible/plugins/connections/local.py)4
-rw-r--r--lib/ansible/plugins/connection/paramiko_ssh.py (renamed from lib/ansible/plugins/connections/paramiko_ssh.py)87
-rw-r--r--lib/ansible/plugins/connection/ssh.py (renamed from lib/ansible/plugins/connections/ssh.py)310
-rw-r--r--lib/ansible/plugins/connection/winrm.py (renamed from lib/ansible/plugins/connections/winrm.py)134
-rw-r--r--lib/ansible/plugins/connection/zone.py (renamed from lib/ansible/plugins/connections/zone.py)0
-rw-r--r--lib/ansible/plugins/filter/core.py102
-rw-r--r--lib/ansible/plugins/filter/mathstuff.py20
-rwxr-xr-xlib/ansible/plugins/lookup/consul_kv.py6
-rw-r--r--lib/ansible/plugins/lookup/credstash.py2
-rw-r--r--lib/ansible/plugins/lookup/csvfile.py4
-rw-r--r--lib/ansible/plugins/lookup/dig.py8
-rw-r--r--lib/ansible/plugins/lookup/ini.py4
-rw-r--r--lib/ansible/plugins/lookup/nested.py2
-rw-r--r--lib/ansible/plugins/lookup/password.py101
-rw-r--r--lib/ansible/plugins/lookup/sequence.py2
-rw-r--r--lib/ansible/plugins/lookup/shelvefile.py2
-rw-r--r--lib/ansible/plugins/lookup/template.py7
-rw-r--r--lib/ansible/plugins/shell/csh.py1
-rw-r--r--lib/ansible/plugins/shell/powershell.py45
-rw-r--r--lib/ansible/plugins/shell/sh.py15
-rw-r--r--lib/ansible/plugins/strategy/__init__.py (renamed from lib/ansible/plugins/strategies/__init__.py)189
-rw-r--r--lib/ansible/plugins/strategy/free.py (renamed from lib/ansible/plugins/strategies/free.py)8
-rw-r--r--lib/ansible/plugins/strategy/linear.py (renamed from lib/ansible/plugins/strategies/linear.py)48
-rw-r--r--lib/ansible/template/__init__.py143
-rw-r--r--lib/ansible/template/safe_eval.py17
-rw-r--r--lib/ansible/template/vars.py3
-rw-r--r--lib/ansible/utils/listify.py7
-rwxr-xr-x[-rw-r--r--]lib/ansible/utils/module_docs.py14
-rw-r--r--lib/ansible/utils/module_docs_fragments/cloudstack.py13
-rw-r--r--lib/ansible/utils/module_docs_fragments/openstack.py2
-rw-r--r--lib/ansible/utils/path.py8
-rw-r--r--lib/ansible/utils/unicode.py2
-rw-r--r--lib/ansible/utils/vars.py97
-rw-r--r--lib/ansible/vars/__init__.py227
-rw-r--r--lib/ansible/vars/hostvars.py4
-rw-r--r--lib/ansible/vars/unsafe_proxy.py151
-rw-r--r--packaging/port/sysutils/ansible/Makefile69
-rw-r--r--packaging/rpm/ansible.spec11
-rw-r--r--samples/include.yml2
-rw-r--r--samples/test_block.yml1
-rw-r--r--samples/test_blocks_of_blocks.yml6
-rw-r--r--setup.py2
-rwxr-xr-xtest/code-smell/replace-urlopen.sh12
-rw-r--r--test/integration/cleanup_ec2.py6
-rw-r--r--test/integration/cleanup_gce.py4
-rwxr-xr-x[-rw-r--r--]test/integration/cleanup_rax.py4
-rw-r--r--test/integration/cloudstack.yml1
-rw-r--r--test/integration/consul_running.py2
-rw-r--r--test/integration/galaxy_roles.yml4
-rw-r--r--test/integration/roles/test_cs_user/meta/main.yml3
-rw-r--r--test/integration/roles/test_cs_user/tasks/main.yml276
-rw-r--r--test/integration/roles/test_fetch/tasks/main.yml31
-rw-r--r--test/integration/roles/test_includes/tasks/main.yml11
-rwxr-xr-xtest/integration/roles/test_service/files/ansible_test_service4
-rw-r--r--test/integration/roles/test_win_copy/files/empty.txt (renamed from lib/ansible/galaxy/data/__init__.py)0
-rw-r--r--test/integration/roles/test_win_copy/tasks/main.yml35
-rw-r--r--test/integration/roles/test_win_group/defaults/main.yml4
-rw-r--r--test/integration/roles/test_win_group/tasks/main.yml101
-rw-r--r--test/integration/roles/test_win_lineinfile/files/test.txt5
-rw-r--r--test/integration/roles/test_win_lineinfile/files/test_quoting.txt (renamed from v1/ansible/callback_plugins/__init__.py)0
-rw-r--r--test/integration/roles/test_win_lineinfile/files/testempty.txt (renamed from v1/ansible/inventory/vars_plugins/__init__.py)0
-rw-r--r--test/integration/roles/test_win_lineinfile/files/testnoeof.txt2
-rw-r--r--test/integration/roles/test_win_lineinfile/meta/main.yml3
-rw-r--r--test/integration/roles/test_win_lineinfile/tasks/main.yml641
-rw-r--r--test/integration/roles/test_win_msi/defaults/main.yml6
-rw-r--r--test/integration/roles/test_win_msi/tasks/main.yml35
-rw-r--r--test/integration/roles/test_win_ping/library/win_ping_set_attr.ps131
-rw-r--r--test/integration/roles/test_win_ping/library/win_ping_strict_mode_error.ps1 (renamed from v1/ansible/runner/shell_plugins/fish.py)21
-rw-r--r--test/integration/roles/test_win_ping/library/win_ping_syntax_error.ps130
-rw-r--r--test/integration/roles/test_win_ping/library/win_ping_throw.ps1 (renamed from v1/ansible/module_utils/__init__.py)17
-rw-r--r--[-rwxr-xr-x]test/integration/roles/test_win_ping/library/win_ping_throw_string.ps1 (renamed from v1/hacking/get_library.py)25
-rw-r--r--test/integration/roles/test_win_ping/tasks/main.yml65
-rw-r--r--test/integration/roles/test_win_raw/tasks/main.yml9
-rw-r--r--test/integration/roles/test_win_script/files/test_script_bool.ps16
-rw-r--r--test/integration/roles/test_win_script/tasks/main.yml10
-rw-r--r--test/integration/setup_gce.py4
-rw-r--r--test/integration/test_winrm.yml3
-rw-r--r--test/integration/unicode.yml11
-rw-r--r--test/units/executor/test_play_iterator.py4
-rw-r--r--test/units/executor/test_task_executor.py9
-rw-r--r--test/units/inventory/__init__.py (renamed from v1/ansible/__init__.py)7
-rw-r--r--test/units/inventory/test_inventory.py100
-rw-r--r--test/units/mock/loader.py4
-rw-r--r--test/units/module_utils/test_basic.py24
-rw-r--r--test/units/parsing/test_addresses.py70
-rw-r--r--test/units/parsing/test_data_loader.py11
-rw-r--r--test/units/parsing/test_mod_args.py2
-rw-r--r--test/units/parsing/vault/test_vault.py5
-rw-r--r--test/units/parsing/vault/test_vault_editor.py18
-rw-r--r--test/units/playbook/test_attribute.py55
-rw-r--r--test/units/playbook/test_play_context.py16
-rw-r--r--test/units/plugins/action/test_action.py57
-rw-r--r--test/units/plugins/action/test_add_host.py47
-rw-r--r--test/units/plugins/cache/test_cache.py17
-rw-r--r--test/units/plugins/connections/test_connection.py20
-rw-r--r--test/units/plugins/lookup/test_password.py136
-rw-r--r--test/units/plugins/strategies/test_strategy_base.py2
-rw-r--r--test/units/plugins/test_plugins.py2
-rw-r--r--test/units/template/test_safe_eval.py30
-rw-r--r--test/units/template/test_templar.py33
-rw-r--r--test/units/template/test_template_utilities.py114
-rw-r--r--test/units/utils/__init__.py (renamed from v1/ansible/modules/__init__.py)0
-rw-r--r--test/units/utils/test_vars.py98
-rw-r--r--test/units/vars/test_variable_manager.py166
-rw-r--r--tox.ini11
-rw-r--r--v1/README.md11
-rw-r--r--v1/ansible/cache/__init__.py61
-rw-r--r--v1/ansible/cache/base.py41
-rw-r--r--v1/ansible/cache/jsonfile.py143
-rw-r--r--v1/ansible/cache/memcached.py191
-rw-r--r--v1/ansible/cache/memory.py44
-rw-r--r--v1/ansible/cache/redis.py107
-rw-r--r--v1/ansible/callback_plugins/noop.py94
-rw-r--r--v1/ansible/callbacks.py729
-rw-r--r--v1/ansible/color.py74
-rw-r--r--v1/ansible/constants.py212
-rw-r--r--v1/ansible/errors.py35
-rw-r--r--v1/ansible/inventory/__init__.py654
-rw-r--r--v1/ansible/inventory/dir.py229
-rw-r--r--v1/ansible/inventory/expand_hosts.py116
-rw-r--r--v1/ansible/inventory/group.py117
-rw-r--r--v1/ansible/inventory/host.py67
-rw-r--r--v1/ansible/inventory/ini.py208
-rw-r--r--v1/ansible/inventory/script.py154
-rw-r--r--v1/ansible/inventory/vars_plugins/noop.py48
-rw-r--r--v1/ansible/module_common.py196
-rw-r--r--v1/ansible/module_utils/a10.py103
-rw-r--r--v1/ansible/module_utils/basic.py1631
-rw-r--r--v1/ansible/module_utils/cloudstack.py368
-rw-r--r--v1/ansible/module_utils/database.py128
-rw-r--r--v1/ansible/module_utils/ec2.py188
-rw-r--r--v1/ansible/module_utils/facts.py2786
-rw-r--r--v1/ansible/module_utils/gce.py93
-rw-r--r--v1/ansible/module_utils/known_hosts.py176
-rw-r--r--v1/ansible/module_utils/openstack.py104
-rw-r--r--v1/ansible/module_utils/powershell.ps1166
-rw-r--r--v1/ansible/module_utils/rax.py328
-rw-r--r--v1/ansible/module_utils/redhat.py280
-rw-r--r--v1/ansible/module_utils/splitter.py201
-rw-r--r--v1/ansible/module_utils/urls.py496
m---------v1/ansible/modules/core6
m---------v1/ansible/modules/extras9
-rw-r--r--v1/ansible/playbook/__init__.py874
-rw-r--r--v1/ansible/playbook/play.py949
-rw-r--r--v1/ansible/playbook/task.py346
-rw-r--r--v1/ansible/runner/__init__.py1517
-rw-r--r--v1/ansible/runner/action_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/action_plugins/add_host.py111
-rw-r--r--v1/ansible/runner/action_plugins/assemble.py158
-rw-r--r--v1/ansible/runner/action_plugins/assert.py64
-rw-r--r--v1/ansible/runner/action_plugins/async.py48
-rw-r--r--v1/ansible/runner/action_plugins/copy.py381
-rw-r--r--v1/ansible/runner/action_plugins/debug.py60
-rw-r--r--v1/ansible/runner/action_plugins/fail.py44
-rw-r--r--v1/ansible/runner/action_plugins/fetch.py173
-rw-r--r--v1/ansible/runner/action_plugins/group_by.py108
-rw-r--r--v1/ansible/runner/action_plugins/include_vars.py56
-rw-r--r--v1/ansible/runner/action_plugins/normal.py59
-rw-r--r--v1/ansible/runner/action_plugins/patch.py69
-rw-r--r--v1/ansible/runner/action_plugins/pause.py139
-rw-r--r--v1/ansible/runner/action_plugins/raw.py54
-rw-r--r--v1/ansible/runner/action_plugins/script.py136
-rw-r--r--v1/ansible/runner/action_plugins/set_fact.py47
-rw-r--r--v1/ansible/runner/action_plugins/synchronize.py218
-rw-r--r--v1/ansible/runner/action_plugins/template.py179
-rw-r--r--v1/ansible/runner/action_plugins/unarchive.py121
-rw-r--r--v1/ansible/runner/action_plugins/win_copy.py377
-rw-r--r--v1/ansible/runner/action_plugins/win_template.py146
-rw-r--r--v1/ansible/runner/connection.py53
-rw-r--r--v1/ansible/runner/connection_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/connection_plugins/accelerate.py372
-rw-r--r--v1/ansible/runner/connection_plugins/chroot.py132
-rw-r--r--v1/ansible/runner/connection_plugins/fireball.py153
-rw-r--r--v1/ansible/runner/connection_plugins/funcd.py97
-rw-r--r--v1/ansible/runner/connection_plugins/jail.py153
-rw-r--r--v1/ansible/runner/connection_plugins/libvirt_lxc.py129
-rw-r--r--v1/ansible/runner/connection_plugins/local.py129
-rw-r--r--v1/ansible/runner/connection_plugins/paramiko_ssh.py419
-rw-r--r--v1/ansible/runner/connection_plugins/ssh.py460
-rw-r--r--v1/ansible/runner/connection_plugins/winrm.py270
-rw-r--r--v1/ansible/runner/connection_plugins/zone.py162
-rw-r--r--v1/ansible/runner/filter_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/filter_plugins/core.py431
-rw-r--r--v1/ansible/runner/filter_plugins/ipaddr.py659
-rw-r--r--v1/ansible/runner/filter_plugins/mathstuff.py126
-rw-r--r--v1/ansible/runner/lookup_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/lookup_plugins/cartesian.py59
-rwxr-xr-xv1/ansible/runner/lookup_plugins/consul_kv.py128
-rw-r--r--v1/ansible/runner/lookup_plugins/csvfile.py85
-rw-r--r--v1/ansible/runner/lookup_plugins/dict.py39
-rw-r--r--v1/ansible/runner/lookup_plugins/dig.py212
-rw-r--r--v1/ansible/runner/lookup_plugins/dnstxt.py68
-rw-r--r--v1/ansible/runner/lookup_plugins/env.py41
-rw-r--r--v1/ansible/runner/lookup_plugins/etcd.py78
-rw-r--r--v1/ansible/runner/lookup_plugins/file.py59
-rw-r--r--v1/ansible/runner/lookup_plugins/fileglob.py39
-rw-r--r--v1/ansible/runner/lookup_plugins/first_found.py194
-rw-r--r--v1/ansible/runner/lookup_plugins/flattened.py78
-rw-r--r--v1/ansible/runner/lookup_plugins/indexed_items.py44
-rw-r--r--v1/ansible/runner/lookup_plugins/inventory_hostnames.py48
-rw-r--r--v1/ansible/runner/lookup_plugins/items.py44
-rw-r--r--v1/ansible/runner/lookup_plugins/lines.py38
-rw-r--r--v1/ansible/runner/lookup_plugins/nested.py73
-rw-r--r--v1/ansible/runner/lookup_plugins/password.py129
-rw-r--r--v1/ansible/runner/lookup_plugins/pipe.py52
-rw-r--r--v1/ansible/runner/lookup_plugins/random_choice.py41
-rw-r--r--v1/ansible/runner/lookup_plugins/redis_kv.py72
-rw-r--r--v1/ansible/runner/lookup_plugins/sequence.py216
-rw-r--r--v1/ansible/runner/lookup_plugins/subelements.py67
-rw-r--r--v1/ansible/runner/lookup_plugins/template.py33
-rw-r--r--v1/ansible/runner/lookup_plugins/together.py64
-rw-r--r--v1/ansible/runner/lookup_plugins/url.py48
-rw-r--r--v1/ansible/runner/poller.py115
-rw-r--r--v1/ansible/runner/return_data.py58
-rw-r--r--v1/ansible/runner/shell_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/shell_plugins/csh.py26
-rw-r--r--v1/ansible/runner/shell_plugins/powershell.py131
-rw-r--r--v1/ansible/runner/shell_plugins/sh.py130
-rw-r--r--v1/ansible/utils/__init__.py1662
-rw-r--r--v1/ansible/utils/cmd_functions.py59
-rw-r--r--v1/ansible/utils/display_functions.py63
-rw-r--r--v1/ansible/utils/hashing.py91
-rw-r--r--v1/ansible/utils/module_docs.py111
l---------v1/ansible/utils/module_docs_fragments1
-rw-r--r--v1/ansible/utils/plugins.py304
-rw-r--r--v1/ansible/utils/string_functions.py18
-rw-r--r--v1/ansible/utils/su_prompts.py60
-rw-r--r--v1/ansible/utils/template.py405
-rw-r--r--v1/ansible/utils/unicode.py248
-rw-r--r--v1/ansible/utils/vault.py585
-rwxr-xr-xv1/bin/ansible207
-rwxr-xr-xv1/bin/ansible-doc337
-rwxr-xr-xv1/bin/ansible-galaxy957
-rwxr-xr-xv1/bin/ansible-playbook330
-rwxr-xr-xv1/bin/ansible-pull257
-rwxr-xr-xv1/bin/ansible-vault241
-rw-r--r--v1/hacking/README.md48
-rwxr-xr-xv1/hacking/authors.sh14
-rw-r--r--v1/hacking/env-setup78
-rw-r--r--v1/hacking/env-setup.fish67
-rwxr-xr-xv1/hacking/module_formatter.py447
-rw-r--r--v1/hacking/templates/rst.j2211
-rwxr-xr-xv1/hacking/test-module193
-rwxr-xr-xv1/hacking/update.sh3
-rw-r--r--v1/tests/README.md5
-rw-r--r--v1/tests/TestConstants.py64
-rw-r--r--v1/tests/TestFilters.py191
-rw-r--r--v1/tests/TestInventory.py510
-rw-r--r--v1/tests/TestModuleUtilsBasic.py334
-rw-r--r--v1/tests/TestModuleUtilsDatabase.py118
-rw-r--r--v1/tests/TestModules.py32
-rw-r--r--v1/tests/TestPlayVarsFiles.py390
-rw-r--r--v1/tests/TestSynchronize.py176
-rw-r--r--v1/tests/TestUtils.py945
-rw-r--r--v1/tests/TestUtilsStringFunctions.py33
-rw-r--r--v1/tests/TestVault.py147
-rw-r--r--v1/tests/TestVaultEditor.py180
-rw-r--r--v1/tests/ansible.cfg3
-rw-r--r--v1/tests/inventory_test_data/ansible_hosts2
-rw-r--r--v1/tests/inventory_test_data/broken.yml2
-rw-r--r--v1/tests/inventory_test_data/common_vars.yml4
-rw-r--r--v1/tests/inventory_test_data/complex_hosts96
-rw-r--r--v1/tests/inventory_test_data/encrypted.yml6
-rw-r--r--v1/tests/inventory_test_data/hosts_list.yml6
-rw-r--r--v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_combined_range2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_incorrect_format2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_incorrect_range2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_leading_range6
-rw-r--r--v1/tests/inventory_test_data/inventory/test_missing_end2
-rw-r--r--v1/tests/inventory_test_data/inventory_api.py44
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/0hosts3
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/1mythology6
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/2levels6
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/3comments8
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini2
-rw-r--r--v1/tests/inventory_test_data/large_range1
-rw-r--r--v1/tests/inventory_test_data/restrict_pattern2
-rw-r--r--v1/tests/inventory_test_data/simple_hosts28
-rw-r--r--v1/tests/module_tests/TestApt.py42
-rw-r--r--v1/tests/module_tests/TestDocker.py19
-rw-r--r--v1/tests/vault_test_data/foo-ansible-1.0.yml4
-rw-r--r--v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml6
-rw-r--r--v1/tests/vault_test_data/foo-ansible-1.1.yml6
434 files changed, 6761 insertions, 36296 deletions
diff --git a/.gitmodules b/.gitmodules
index 793522a29c..a0e903430a 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,9 +4,3 @@
[submodule "lib/ansible/modules/extras"]
path = lib/ansible/modules/extras
url = https://github.com/ansible/ansible-modules-extras
-[submodule "v1/ansible/modules/core"]
- path = v1/ansible/modules/core
- url = https://github.com/ansible/ansible-modules-core
-[submodule "v1/ansible/modules/extras"]
- path = v1/ansible/modules/extras
- url = https://github.com/ansible/ansible-modules-extras
diff --git a/.travis.yml b/.travis.yml
index 335a8e58e3..6fb5198dc9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,6 +4,7 @@ env:
- TOKENV=py24
- TOXENV=py26
- TOXENV=py27
+ - TOXENV=py34
addons:
apt:
sources:
@@ -13,8 +14,11 @@ addons:
install:
- pip install tox PyYAML Jinja2 sphinx
script:
+# urllib2's defaults are not secure enough for us
+- ./test/code-smell/replace-urlopen.sh .
- if test x"$TOKENV" != x'py24' ; then tox ; fi
- if test x"$TOKENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi
#- make -C docsite all
+- source ./hacking/env-setup && cd test/integration/ && make test_var_precedence
after_success:
- coveralls
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 09835c40f8..724179a53c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,200 +1,279 @@
Ansible Changes By Release
==========================
-## 2.0 "TBD" - ACTIVE DEVELOPMENT
+## 2.0 "Over the Hills and Far Away" - ACTIVE DEVELOPMENT
Major Changes:
- * Introducing the new block/rescue/always directives, allow for making task blocks and introducing exception like semantics
- * New strategy plugins, allow to control the flow of execution of tasks per play, the default will be the same as before
- * Improved error handling, now you get much more detailed parser messages. General exception handling and display has been revamped.
- * Task includes now get evaluated during execution, end behaviour will be the same but it now allows for more dynamic includes and options.
- * First feature of the more dynamic includes is that "with\_<lookup>" loops are now usable with them.
- * callback, connection and lookup plugin APIs have changed, some will require modification to work with new version
- * callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg
- * Many API changes, this will break those currently using it directly, but the new API is much easier to use and test
- * Settings are now more inheritable, what you set at play, block or role will be automatically inhertited by the contained.
- This allows for new features to automatically be settable at all levels, previously we had to manually code this
- * template code now retains types for bools and numbers instead of turning them into strings.
- If you need the old behaviour, quote the value and it will get passed around as a string
- * added meta: refresh_inventory to force rereading the inventory in a play
- * vars are now settable at play, block, role and task level
- * template code now retains types for bools, and Numbers instead of turning them into strings
- If you need the old behaviour, quote the value and it will get passed around as a string. In the
- case of nulls, the output used to be an empty string.
- * Empty variables and variables set to null in yaml will no longer be converted to empty strings.
- They will retain the value of `None`. To go back to the old behaviour, you can override
- the `null_representation` setting to an empty string in your config file or by setting the
- `ANSIBLE_NULL_REPRESENTATION` environment variable.
+
+* The new block/rescue/always directives allow for making task blocks and exception-like semantics
+* New strategy plugins (e.g. `free`) allow control over the flow of task execution per play. The default (`linear`) will be the same as before.
+* Improved error handling, with more detailed parser messages. General exception handling and display has been revamped.
+* Task includes are now evaluated during execution, allowing more dynamic includes and options.
+* "with\_<lookup>" loops can now be used with includes since they are dynamic.
+* Callback, connection and lookup plugin APIs have changed. Some projects will require modification to work with the new versions.
+* Callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg.
+* Many API changes. Those integrating directly with Ansible's API will encounter breaking changes, but the new API is much easier to use and test.
+* Settings are now more inheritable; what you set at play, block or role will be automatically inhertited by the contained. This allows for new features to automatically be settable at all levels, previously we had to manually code this.
+* Template code now retains types for bools and numbers instead of turning them into strings.
+ If you need the old behaviour, quote the value and it will get passed around as a string
+* Added `meta: refresh_inventory` to force rereading the inventory in a play.
+* Vars are now settable at play, block, role and task level.
+* Empty variables and variables set to null in yaml will no longer be converted to empty strings.
+They will retain the value of `None`. To go back to the old behaviour, you can override
+the `null_representation` setting to an empty string in your config file or by setting the
+`ANSIBLE_NULL_REPRESENTATION` environment variable.
+* Use "pattern1,pattern2" to combine host matching patterns. The use of
+ ':' as a separator is deprecated (accepted with a warning) because it
+ conflicts with IPv6 addresses. The undocumented use of ';' as a
+ separator is no longer supported.
+* Backslashes used when specifying parameters in jinja2 expressions in YAML
+dicts sometimes needed to be escaped twice. This has been fixed so that
+escaping once works. Here's an example of how playbooks need to be modified:
+
+ ```
+ # Syntax in 1.9.x
+ - debug:
+ msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}"
+ # Syntax in 2.0.x
+ - debug:
+ msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}"
+
+ # Output:
+ "msg": "test1 1\\3"
+ ```
+
+* When a string with a trailing newline was specified in the playbook via yaml
+dict format, the trailing newline was stripped. When specified in key=value
+format the trailing newlines were kept. In v2, both methods of specifying the
+string will keep the trailing newlines. If you relied on the trailing
+newline being stripped you can change your playbook like this:
+
+ ```
+ # Syntax in 1.9.2
+ vars:
+ message: >
+ Testing
+ some things
+ tasks:
+ - debug:
+ msg: "{{ message }}"
+
+ # Syntax in 2.0.x
+ vars:
+ old_message: >
+ Testing
+ some things
+ message: "{{ old_messsage[:-1] }}"
+ - debug:
+ msg: "{{ message }}"
+ # Output
+ "msg": "Testing some things"
+ ```
Deprecated Modules (new ones in parens):
- * ec2_ami_search (ec2_ami_find)
- * quantum_network (os_network)
- * glance_image
- * nova_compute (os_server)
- * quantum_floating_ip (os_floating_ip)
+
+* ec2_ami_search (ec2_ami_find)
+* quantum_network (os_network)
+* glance_image
+* nova_compute (os_server)
+* quantum_floating_ip (os_floating_ip)
New Modules:
- * amazon: ec2_ami_copy
- * amazon: ec2_ami_find
- * amazon: ec2_elb_facts
- * amazon: ec2_eni
- * amazon: ec2_eni_facts
- * amazon: ec2_remote_facts
- * amazon: ec2_vpc_net
- * amazon: ec2_vpc_route_table_facts
- * amazon: ec2_vpc_subnet
- * amazon: ec2_win_password
- * amazon: elasticache_subnet_group
- * amazon: iam
- * amazon: iam_policy
- * amazon: route53_zone
- * amazon: sts_assume_role
- * amazon: s3_bucket
- * amazon: s3_logging
- * apk
- * bundler
- * centurylink: clc_loadbalancer
- * centurylink: clc_modify_server
- * centurylink: clc_publicip
- * centurylink: clc_server
- * circonus_annotation
- * consul
- * consul_acl
- * consul_kv
- * consul_session
- * cloudtrail
- * cloudstack: cs_account
- * cloudstack: cs_affinitygroup
- * cloudstack: cs_domain
- * cloudstack: cs_facts
- * cloudstack: cs_firewall
- * cloudstack: cs_iso
- * cloudstack: cs_instance
- * cloudstack: cs_instancegroup
- * cloudstack: cs_ip_address
- * cloudstack: cs_network
- * cloudstack: cs_portforward
- * cloudstack: cs_project
- * cloudstack: cs_sshkeypair
- * cloudstack: cs_securitygroup
- * cloudstack: cs_securitygroup_rule
- * cloudstack: cs_staticnat
- * cloudstack: cs_template
- * cloudstack: cs_vmsnapshot
- * datadog_monitor
- * dpkg_selections
- * elasticsearch_plugin
- * expect
- * find
- * hall
- * libvirt: virt_net
- * libvirt: virt_pool
- * maven_artifact
- * openstack: os_ironic
- * openstack: os_ironic_node
- * openstack: os_client_config
- * openstack: os_floating_ip
- * openstack: os_image
- * openstack: os_network
- * openstack: os_nova_flavor
- * openstack: os_object
- * openstack: os_security_group
- * openstack: os_security_group_rule
- * openstack: os_server
- * openstack: os_server_actions
- * openstack: os_server_facts
- * openstack: os_server_volume
- * openstack: os_subnet
- * openstack: os_volume
- * osx_defaults
- * pam_limits
- * pear
- * profitbricks: profitbricks
- * profitbricks: profitbricks_datacenter
- * profitbricks: profitbricks_nic
- * profitbricks: profitbricks_volume
- * proxmox
- * proxmox_template
- * puppet
- * pushover
- * pushbullet
- * rax: rax_mon_alarm
- * rax: rax_mon_check
- * rax: rax_mon_entity
- * rax: rax_mon_notification
- * rax: rax_mon_notification_plan
- * rabbitmq_binding
- * rabbitmq_exchange
- * rabbitmq_queue
- * selinux_permissive
- * sensu_check
- * sensu_subscription
- * seport
- * slackpkg
- * solaris_zone
- * vertica_configuration
- * vertica_facts
- * vertica_role
- * vertica_schema
- * vertica_user
- * vmware: vmware_datacenter
- * vmware: vmware_cluster
- * vmware: vmware_dns_config
- * vmware: vmware_dvs_host
- * vmware: vmware_vsan_cluster
- * vmware: vmware_vswitch
- * vmware: vca_fw
- * vmware: vca_nat
- * vmware: vsphere_copy
- * webfaction_app
- * webfaction_db
- * webfaction_domain
- * webfaction_mailbox
- * webfaction_site
- * win_environment
- * win_scheduled_task
- * win_iis_virtualdirectory
- * win_iis_webapplication
- * win_iis_webapppool
- * win_iis_webbinding
- * win_iis_website
- * win_regedit
- * win_unzip
- * xenserver_facts
- * zabbix_host
- * zabbix_hostmacro
- * zabbix_screen
- * znode
+
+* amazon: ec2_ami_copy
+* amazon: ec2_ami_find
+* amazon: ec2_elb_facts
+* amazon: ec2_eni
+* amazon: ec2_eni_facts
+* amazon: ec2_remote_facts
+* amazon: ec2_vpc_net
+* amazon: ec2_vpc_route_table
+* amazon: ec2_vpc_route_table_facts
+* amazon: ec2_vpc_subnet
+* amazon: ec2_win_password
+* amazon: elasticache_subnet_group
+* amazon: iam
+* amazon: iam_policy
+* amazon: route53_zone
+* amazon: sts_assume_role
+* amazon: s3_bucket
+* amazon: s3_lifecycle
+* amazon: s3_logging
+* apk
+* bundler
+* centurylink: clc_blueprint_package
+* centurylink: clc_firewall_policy
+* centurylink: clc_loadbalancer
+* centurylink: clc_modify_server
+* centurylink: clc_publicip
+* centurylink: clc_server
+* circonus_annotation
+* consul
+* consul_acl
+* consul_kv
+* consul_session
+* cloudtrail
+* cloudstack: cs_account
+* cloudstack: cs_affinitygroup
+* cloudstack: cs_domain
+* cloudstack: cs_facts
+* cloudstack: cs_firewall
+* cloudstack: cs_iso
+* cloudstack: cs_instance
+* cloudstack: cs_instancegroup
+* cloudstack: cs_ip_address
+* cloudstack: cs_network
+* cloudstack: cs_portforward
+* cloudstack: cs_project
+* cloudstack: cs_sshkeypair
+* cloudstack: cs_securitygroup
+* cloudstack: cs_securitygroup_rule
+* cloudstack: cs_staticnat
+* cloudstack: cs_template
+* cloudstack: cs_user
+* cloudstack: cs_vmsnapshot
+* datadog_monitor
+* dpkg_selections
+* elasticsearch_plugin
+* expect
+* find
+* hall
+* libvirt: virt_net
+* libvirt: virt_pool
+* maven_artifact
+* openstack: os_ironic
+* openstack: os_ironic_node
+* openstack: os_client_config
+* openstack: os_floating_ip
+* openstack: os_image
+* openstack: os_network
+* openstack: os_nova_flavor
+* openstack: os_object
+* openstack: os_security_group
+* openstack: os_security_group_rule
+* openstack: os_server
+* openstack: os_server_actions
+* openstack: os_server_facts
+* openstack: os_server_volume
+* openstack: os_subnet
+* openstack: os_user_group
+* openstack: os_volume
+* openvswitch_db.
+* osx_defaults
+* pagerduty_alert
+* pam_limits
+* pear
+* profitbricks: profitbricks
+* profitbricks: profitbricks_datacenter
+* profitbricks: profitbricks_nic
+* profitbricks: profitbricks_snapshot
+* profitbricks: profitbricks_volume
+* profitbricks: profitbricks_volume_attachments
+* proxmox
+* proxmox_template
+* puppet
+* pushover
+* pushbullet
+* rax: rax_mon_alarm
+* rax: rax_mon_check
+* rax: rax_mon_entity
+* rax: rax_mon_notification
+* rax: rax_mon_notification_plan
+* rabbitmq_binding
+* rabbitmq_exchange
+* rabbitmq_queue
+* selinux_permissive
+* sensu_check
+* sensu_subscription
+* seport
+* slackpkg
+* solaris_zone
+* vertica_configuration
+* vertica_facts
+* vertica_role
+* vertica_schema
+* vertica_user
+* vmware: vmware_datacenter
+* vmware: vmware_cluster
+* vmware: vmware_dns_config
+* vmware: vmware_dvs_host
+* vmware: vmware_dvs_portgroup
+* vmware: vmware_dvswitch
+* vmware: vmware_host
+* vmware: vmware_vmkernel_ip_config
+* vmware: vmware_portgroup
+* vmware: vmware_vm_facts
+* vmware: vmware_vmkernel
+* vmware: vmware_vsan_cluster
+* vmware: vmware_vswitch
+* vmware: vca_fw
+* vmware: vca_nat
+* vmware: vsphere_copy
+* webfaction_app
+* webfaction_db
+* webfaction_domain
+* webfaction_mailbox
+* webfaction_site
+* win_environment
+* win_package
+* win_scheduled_task
+* win_iis_virtualdirectory
+* win_iis_webapplication
+* win_iis_webapppool
+* win_iis_webbinding
+* win_iis_website
+* win_regedit
+* win_unzip
+* xenserver_facts
+* zabbix_host
+* zabbix_hostmacro
+* zabbix_screen
+* znode
New Inventory scripts:
- * cloudstack
- * fleetctl
- * openvz
- * proxmox
- * serf
+
+* cloudstack
+* fleetctl
+* openvz
+* nagios_ndo
+* proxmox
+* serf
New Lookups:
- * credstash
- * hashi_vault
- * ini
- * shelvefile
+
+* credstash
+* hashi_vault
+* ini
+* shelvefile
+
+New filters:
+
+* combine
New Connection Methods:
- * Added a connection plugin for talking to docker containers on the ansible controller machine without using ssh
+
+* Added a connection plugin for talking to docker containers on the ansible controller machine without using ssh.
Minor changes:
- * Many more tests, new API makes things more testable and we took advantage of it
- * big_ip modules now support turning off ssl certificate validation (use only for self signed)
- * The undocumented semicolon-separated "pattern1;pattern2" syntax to match hosts is no longer supported.
- * Now when you delegate a action that returns ansible_facts, these facts will now be applied to the delegated host,
- unlike before which they were applied to the current host.
- * Consolidated code from modules using urllib2 to normalize features, TLS and SNI support
- * synchronize module's dest_port parameter now takes precedence over the ansible_ssh_port inventory setting
- * play output is now dynamically sized to terminal with a minimal of 80 coluumns (old default)
- * vars_prompt and pause are now skipped with a warning if the play is called non interactively (i.e. pull from cron)
- * Support for OpenBSD's 'doas' privilege escalation method.
- * most vault operations can now be done over multilple files
+* Many more tests. The new API makes things more testable and we took advantage of it.
+* big_ip modules now support turning off ssl certificate validation (use only for self-signed certificates).
+* Use ``hosts: groupname[x:y]`` to select a subset of hosts in a group; the
+``[x-y]`` range syntax is no longer supported. Note that ``[0:1]`` matches
+two hosts, i.e. the range is inclusive of its endpoints.
+* Now when you delegate an action that returns ansible_facts, these facts will be applied to the delegated host, unlike before when they were applied to the current host.
+* Consolidated code from modules using urllib2 to normalize features, TLS and SNI support.
+* synchronize module's dest_port parameter now takes precedence over the ansible_ssh_port inventory setting.
+* Play output is now dynamically sized to terminal with a minimum of 80 coluumns (old default).
+* vars_prompt and pause are now skipped with a warning if the play is called non interactively (i.e. pull from cron).
+* Support for OpenBSD's 'doas' privilege escalation method.
+* Most vault operations can now be done over multilple files.
+* ansible-vault encrypt/decrypt read from stdin if no other input file is given,
+and can write to a given ``--output file`` (including stdout, '-'). This lets
+you avoid ever writing sensitive plaintext to disk.
+* ansible-vault rekey accepts the --new-vault-password-file option.
+* Configuration items defined as paths (local only) now all support shell style interpolations.
+* Many fixes and new options added to modules, too many to list here.
## 1.9.2 "Dancing In the Street" - Jun 26, 2015
diff --git a/MANIFEST.in b/MANIFEST.in
index b9bf5f4276..2195e5d311 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -14,7 +14,7 @@ include Makefile
include VERSION
include MANIFEST.in
include contrib/README.md
-include contrib/inventory *
+recursive-include contrib/inventory *
exclude lib/ansible/modules/core/.git*
exclude lib/ansible/modules/extras/.git*
prune lib/ansible/modules/core/.git
diff --git a/VERSION b/VERSION
index a4b5d82d9e..5abb309712 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.0.0 0.0.pre
+2.0.0 0.2.alpha2
diff --git a/contrib/inventory/abiquo.py b/contrib/inventory/abiquo.py
index a6030c58b8..cd068e482b 100755
--- a/contrib/inventory/abiquo.py
+++ b/contrib/inventory/abiquo.py
@@ -45,26 +45,24 @@ import os
import sys
import time
import ConfigParser
-import urllib2
-import base64
try:
import json
except ImportError:
import simplejson as json
+from ansible.module_utils.urls import open_url
+
def api_get(link, config):
try:
if link == None:
- request = urllib2.Request(config.get('api','uri')+config.get('api','login_path'))
- request.add_header("Accept",config.get('api','login_type'))
+ url = config.get('api','uri') + config.get('api','login_path')
+ headers = {"Accept": config.get('api','login_type')}
else:
- request = urllib2.Request(link['href']+'?limit=0')
- request.add_header("Accept",link['type'])
- # Auth
- base64string = base64.encodestring('%s:%s' % (config.get('auth','apiuser'),config.get('auth','apipass'))).replace('\n', '')
- request.add_header("Authorization", "Basic %s" % base64string)
- result = urllib2.urlopen(request)
+ url = link['href'] + '?limit=0'
+ headers = {"Accept": link['type']}
+ result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''),
+ url_password=config.get('auth','apipass').replace('\n', ''))
return json.loads(result.read())
except:
return None
@@ -76,7 +74,7 @@ def save_cache(data, config):
cache = open('/'.join([dpath,'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
- except IOError, e:
+ except IOError as e:
pass # not really sure what to do here
@@ -88,7 +86,7 @@ def get_cache(cache_item, config):
cache = open('/'.join([dpath,'inventory']), 'r')
inv = cache.read()
cache.close()
- except IOError, e:
+ except IOError as e:
pass # not really sure what to do here
return inv
@@ -172,7 +170,7 @@ def generate_inv_from_api(enterprise_entity,config):
else:
vm_metadata = metadata['metadata']['metadata']
inventory['_meta']['hostvars'][vm_nic] = vm_metadata
- except Exception, e:
+ except Exception as e:
pass
inventory[vm_vapp]['children'].append(vmcollection['name'])
@@ -183,7 +181,7 @@ def generate_inv_from_api(enterprise_entity,config):
inventory[vmcollection['name']].append(vm_nic)
return inventory
- except Exception, e:
+ except Exception as e:
# Return empty hosts output
return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
@@ -214,7 +212,7 @@ if __name__ == '__main__':
try:
login = api_get(None,config)
enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise'))
- except Exception, e:
+ except Exception as e:
enterprise = None
if cache_available(config):
diff --git a/contrib/inventory/apache-libcloud.py b/contrib/inventory/apache-libcloud.py
index 151daeefe0..f7d64c257c 100755
--- a/contrib/inventory/apache-libcloud.py
+++ b/contrib/inventory/apache-libcloud.py
@@ -37,6 +37,7 @@ import re
from time import time
import ConfigParser
+from six import iteritems, string_types
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security as sec
@@ -79,7 +80,7 @@ class LibcloudInventory(object):
else:
data_to_print = self.json_format_dict(self.inventory, True)
- print data_to_print
+ print(data_to_print)
def is_cache_valid(self):
@@ -259,16 +260,16 @@ class LibcloudInventory(object):
key = self.to_safe('ec2_' + key)
# Handle complex types
- if type(value) in [int, bool]:
+ if isinstance(value, (int, bool)):
instance_vars[key] = value
- elif type(value) in [str, unicode]:
+ elif isinstance(value, string_types):
instance_vars[key] = value.strip()
- elif type(value) == type(None):
+ elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
- for k, v in value.iteritems():
+ for k, v in iteritems(value):
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -282,9 +283,9 @@ class LibcloudInventory(object):
else:
pass
# TODO Product codes if someone finds them useful
- #print key
- #print type(value)
- #print value
+ #print(key)
+ #print(type(value))
+ #print(value)
return self.json_format_dict(instance_vars, True)
diff --git a/contrib/inventory/cloudstack.py b/contrib/inventory/cloudstack.py
index 426cf163fd..5911f662c9 100755
--- a/contrib/inventory/cloudstack.py
+++ b/contrib/inventory/cloudstack.py
@@ -70,6 +70,8 @@ based on the data obtained from CloudStack API:
usage: cloudstack.py [--list] [--host HOST] [--project PROJECT]
"""
+from __future__ import print_function
+
import os
import sys
import argparse
@@ -83,7 +85,8 @@ except:
try:
from cs import CloudStack, CloudStackException, read_config
except ImportError:
- print >> sys.stderr, "Error: CloudStack library must be installed: pip install cs."
+ print("Error: CloudStack library must be installed: pip install cs.",
+ file=sys.stderr)
sys.exit(1)
@@ -98,8 +101,8 @@ class CloudStackInventory(object):
options = parser.parse_args()
try:
self.cs = CloudStack(**read_config())
- except CloudStackException, e:
- print >> sys.stderr, "Error: Could not connect to CloudStack API"
+ except CloudStackException as e:
+ print("Error: Could not connect to CloudStack API", file=sys.stderr)
project_id = ''
if options.project:
@@ -107,13 +110,14 @@ class CloudStackInventory(object):
if options.host:
data = self.get_host(options.host)
- print json.dumps(data, indent=2)
+ print(json.dumps(data, indent=2))
elif options.list:
data = self.get_list()
- print json.dumps(data, indent=2)
+ print(json.dumps(data, indent=2))
else:
- print >> sys.stderr, "usage: --list | --host <hostname> [--project <project>]"
+ print("usage: --list | --host <hostname> [--project <project>]",
+ file=sys.stderr)
sys.exit(1)
@@ -123,7 +127,7 @@ class CloudStackInventory(object):
for p in projects['project']:
if p['name'] == project or p['id'] == project:
return p['id']
- print >> sys.stderr, "Error: Project %s not found." % project
+ print("Error: Project %s not found." % project, file=sys.stderr)
sys.exit(1)
diff --git a/contrib/inventory/cobbler.py b/contrib/inventory/cobbler.py
index 469fac21ad..b5fcdeacbb 100755
--- a/contrib/inventory/cobbler.py
+++ b/contrib/inventory/cobbler.py
@@ -72,6 +72,8 @@ try:
except ImportError:
import simplejson as json
+from six import iteritems
+
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
@@ -113,7 +115,7 @@ class CobblerInventory(object):
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] }
data_to_print += self.json_format_dict(self.inventory, True)
- print data_to_print
+ print(data_to_print)
def _connect(self):
if not self.conn:
@@ -169,8 +171,15 @@ class CobblerInventory(object):
dns_name = host['hostname'] #None
ksmeta = None
interfaces = host['interfaces']
-
- if dns_name is None:
+ # hostname is often empty for non-static IP hosts
+ if dns_name == '':
+ for (iname, ivalue) in iteritems(interfaces):
+ if ivalue['management'] or not ivalue['static']:
+ this_dns_name = ivalue.get('dns_name', None)
+ if this_dns_name is not None and this_dns_name is not "":
+ dns_name = this_dns_name
+
+ if dns_name == '':
continue
status = host['status']
@@ -196,7 +205,7 @@ class CobblerInventory(object):
self.cache[dns_name] = host
if "ks_meta" in host:
- for key, value in host["ks_meta"].iteritems():
+ for key, value in iteritems(host["ks_meta"]):
self.cache[dns_name][key] = value
self.write_to_cache(self.cache, self.cache_path_cache)
diff --git a/contrib/inventory/collins.py b/contrib/inventory/collins.py
index 775beecb1d..bbcb32b017 100755
--- a/contrib/inventory/collins.py
+++ b/contrib/inventory/collins.py
@@ -67,7 +67,6 @@ Tested against Ansible 1.8.2 and Collins 1.3.0.
import argparse
-import base64
import ConfigParser
import logging
import os
@@ -76,13 +75,15 @@ import sys
from time import time
import traceback
import urllib
-import urllib2
try:
import json
except ImportError:
import simplejson as json
+from six import iteritems
+
+from ansible.module_utils.urls import open_url
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
@@ -164,7 +165,7 @@ class CollinsInventory(object):
else: # default action with no options
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
- print data_to_print
+ print(data_to_print)
return successful
def find_assets(self, attributes = {}, operation = 'AND'):
@@ -174,7 +175,7 @@ class CollinsInventory(object):
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = [ '='.join(attr_pair)
- for attr_pair in attributes.iteritems() ]
+ for attr_pair in iteritems(attributes) ]
query_parameters = {
'details': ['True'],
'operation': [operation],
@@ -196,10 +197,11 @@ class CollinsInventory(object):
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
urllib.urlencode(query_parameters, doseq=True)
)
- request = urllib2.Request(query_url)
- request.add_header('Authorization', self.basic_auth_header)
try:
- response = urllib2.urlopen(request, timeout=self.collins_timeout_secs)
+ response = open_url(query_url,
+ timeout=self.collins_timeout_secs,
+ url_username=self.collins_username,
+ url_password=self.collins_password)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
@@ -259,8 +261,6 @@ class CollinsInventory(object):
log_path = config.get('collins', 'log_path')
self.log_location = log_path + '/ansible-collins.log'
- self.basic_auth_header = "Basic %s" % base64.encodestring(
- '%s:%s' % (self.collins_username, self.collins_password))[:-1]
def parse_cli_args(self):
""" Command line argument processing """
diff --git a/contrib/inventory/consul_io.py b/contrib/inventory/consul_io.py
index 4e40f96873..1bcf22d373 100755
--- a/contrib/inventory/consul_io.py
+++ b/contrib/inventory/consul_io.py
@@ -136,11 +136,12 @@ except ImportError:
try:
import consul
-except ImportError, e:
- print """failed=True msg='python-consul required for this module. see
- http://python-consul.readthedocs.org/en/latest/#installation'"""
+except ImportError as e:
+ print("""failed=True msg='python-consul required for this module. see
+ http://python-consul.readthedocs.org/en/latest/#installation'""")
sys.exit(1)
+from six import iteritems
class ConsulInventory(object):
@@ -171,7 +172,7 @@ class ConsulInventory(object):
self.load_all_data_consul()
self.combine_all_results()
- print json.dumps(self.inventory, sort_keys=True, indent=2)
+ print(json.dumps(self.inventory, sort_keys=True, indent=2))
def load_all_data_consul(self):
''' cycle through each of the datacenters in the consul catalog and process
@@ -187,7 +188,7 @@ class ConsulInventory(object):
an 'available' or 'unavailable' grouping. The suffix for each group can be
controlled from the config'''
if self.config.has_config('availability'):
- for service_name, service in node['Services'].iteritems():
+ for service_name, service in iteritems(node['Services']):
for node in self.consul_api.health.service(service_name)[1]:
for check in node['Checks']:
if check['ServiceName'] == service_name:
diff --git a/contrib/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py
index 1927f09fdf..97e6ea1183 100755
--- a/contrib/inventory/digital_ocean.py
+++ b/contrib/inventory/digital_ocean.py
@@ -145,8 +145,8 @@ except ImportError:
try:
from dopy.manager import DoError, DoManager
-except ImportError, e:
- print "failed=True msg='`dopy` library required for this script'"
+except ImportError as e:
+ print("failed=True msg='`dopy` library required for this script'")
sys.exit(1)
@@ -175,14 +175,14 @@ class DigitalOceanInventory(object):
# Verify credentials were set
if not hasattr(self, 'api_token'):
- print '''Could not find values for DigitalOcean api_token.
+ print('''Could not find values for DigitalOcean api_token.
They must be specified via either ini file, command line argument (--api-token),
-or environment variables (DO_API_TOKEN)'''
+or environment variables (DO_API_TOKEN)''')
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
- print "DO_API_TOKEN=%s" % self.api_token
+ print("DO_API_TOKEN=%s" % self.api_token)
sys.exit(0)
# Manage cache
@@ -193,7 +193,7 @@ or environment variables (DO_API_TOKEN)'''
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
- print '''Cache is empty and --force-cache was specified'''
+ print('''Cache is empty and --force-cache was specified''')
sys.exit(-1)
self.manager = DoManager(None, self.api_token, api_version=2)
@@ -231,9 +231,9 @@ or environment variables (DO_API_TOKEN)'''
self.write_to_cache()
if self.args.pretty:
- print json.dumps(json_data, sort_keys=True, indent=2)
+ print(json.dumps(json_data, sort_keys=True, indent=2))
else:
- print json.dumps(json_data)
+ print(json.dumps(json_data))
# That's all she wrote...
diff --git a/contrib/inventory/docker.py b/contrib/inventory/docker.py
index 0460b2d3bd..7e8ee30a7c 100755
--- a/contrib/inventory/docker.py
+++ b/contrib/inventory/docker.py
@@ -334,7 +334,7 @@ def list_groups():
groups['docker_hosts'] = [host.get('base_url') for host in hosts]
groups['_meta'] = dict()
groups['_meta']['hostvars'] = hostvars
- print json.dumps(groups, sort_keys=True, indent=4)
+ print(json.dumps(groups, sort_keys=True, indent=4))
sys.exit(0)
diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py
index e4b0b072d4..7ed9b83e77 100755
--- a/contrib/inventory/ec2.py
+++ b/contrib/inventory/ec2.py
@@ -187,10 +187,10 @@ class Ec2Inventory(object):
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
- if six.PY2:
- config = configparser.SafeConfigParser()
- else:
+ if six.PY3:
config = configparser.ConfigParser()
+ else:
+ config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
diff --git a/contrib/inventory/fleet.py b/contrib/inventory/fleet.py
index 3267aeb2ea..788e1a5f51 100755
--- a/contrib/inventory/fleet.py
+++ b/contrib/inventory/fleet.py
@@ -81,7 +81,7 @@ if options.list:
for data in ssh_config:
hosts['coreos'].append(data['Host'])
- print json.dumps(hosts)
+ print(json.dumps(hosts))
sys.exit(1)
# Get out the host details
@@ -96,7 +96,7 @@ elif options.host:
result = details[0]
result
- print json.dumps(result)
+ print(json.dumps(result))
sys.exit(1)
diff --git a/contrib/inventory/freeipa.py b/contrib/inventory/freeipa.py
index 05a8dba356..a2632621ca 100755
--- a/contrib/inventory/freeipa.py
+++ b/contrib/inventory/freeipa.py
@@ -41,7 +41,7 @@ def list_groups(api):
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
- print inv_string
+ print(inv_string)
return None
@@ -69,7 +69,7 @@ def print_host(host):
This function expects one string, this hostname to lookup variables for.
'''
- print json.dumps({})
+ print(json.dumps({}))
return None
diff --git a/contrib/inventory/gce.py b/contrib/inventory/gce.py
index 740e112332..b13c194a6e 100755
--- a/contrib/inventory/gce.py
+++ b/contrib/inventory/gce.py
@@ -112,9 +112,9 @@ class GceInventory(object):
# Just display data for specific host
if self.args.host:
- print self.json_format_dict(self.node_to_dict(
+ print(self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
- pretty=self.args.pretty)
+ pretty=self.args.pretty))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
@@ -237,7 +237,7 @@ class GceInventory(object):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
- except Exception, e:
+ except Exception as e:
return None
def group_instances(self):
diff --git a/contrib/inventory/jail.py b/contrib/inventory/jail.py
index 29c34aef8e..843812b33c 100755
--- a/contrib/inventory/jail.py
+++ b/contrib/inventory/jail.py
@@ -30,8 +30,8 @@ result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print json.dumps(result)
+ print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print json.dumps({'ansible_connection': 'jail'})
+ print(json.dumps({'ansible_connection': 'jail'}))
else:
- print "Need an argument, either --list or --host <host>"
+ print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py
index ff037265cd..1491afd577 100755
--- a/contrib/inventory/libvirt_lxc.py
+++ b/contrib/inventory/libvirt_lxc.py
@@ -30,8 +30,8 @@ result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'lxc'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print json.dumps(result)
+ print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print json.dumps({'ansible_connection': 'lxc'})
+ print(json.dumps({'ansible_connection': 'lxc'}))
else:
- print "Need an argument, either --list or --host <host>"
+ print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/linode.py b/contrib/inventory/linode.py
index cbce5f8a69..f2b61b7075 100755
--- a/contrib/inventory/linode.py
+++ b/contrib/inventory/linode.py
@@ -101,7 +101,7 @@ except:
from chube.linode_obj import Linode
sys.path = old_path
- except Exception, e:
+ except Exception as e:
raise Exception("could not import chube")
load_chube_config()
@@ -139,7 +139,7 @@ class LinodeInventory(object):
else:
data_to_print = self.json_format_dict(self.inventory, True)
- print data_to_print
+ print(data_to_print)
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
@@ -184,20 +184,20 @@ class LinodeInventory(object):
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
- except chube_api.linode_api.ApiError, e:
- print "Looks like Linode's API is down:"
- print
- print e
+ except chube_api.linode_api.ApiError as e:
+ print("Looks like Linode's API is down:")
+ print("")
+ print(e)
sys.exit(1)
def get_node(self, linode_id):
"""Gets details about a specific node."""
try:
return Linode.find(api_id=linode_id)
- except chube_api.linode_api.ApiError, e:
- print "Looks like Linode's API is down:"
- print
- print e
+ except chube_api.linode_api.ApiError as e:
+ print("Looks like Linode's API is down:")
+ print("")
+ print(e)
sys.exit(1)
def populate_datacenter_cache(self):
diff --git a/contrib/inventory/nagios_ndo.ini b/contrib/inventory/nagios_ndo.ini
new file mode 100644
index 0000000000..1e133a29f3
--- /dev/null
+++ b/contrib/inventory/nagios_ndo.ini
@@ -0,0 +1,10 @@
+# Ansible Nagios external inventory script settings
+#
+
+[ndo]
+# NDO database URI
+# Make sure that data is returned as strings and not bytes if using python 3.
+# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html
+# for supported databases and URI format.
+# Example for mysqlclient module :
+database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1
diff --git a/contrib/inventory/nagios_ndo.py b/contrib/inventory/nagios_ndo.py
new file mode 100755
index 0000000000..49ec56392a
--- /dev/null
+++ b/contrib/inventory/nagios_ndo.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Nagios NDO external inventory script.
+========================================
+
+Returns hosts and hostgroups from Nagios NDO.
+
+Configuration is read from `nagios_ndo.ini`.
+"""
+
+import os
+import argparse
+try:
+ import configparser
+except ImportError:
+ import ConfigParser
+ configparser = ConfigParser
+import json
+
+try:
+ from sqlalchemy import text
+ from sqlalchemy.engine import create_engine
+except ImportError:
+ print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
+ exit(1)
+
+class NagiosNDOInventory(object):
+
+ def read_settings(self):
+ config = configparser.SafeConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
+ if config.has_option('ndo', 'database_uri'):
+ self.ndo_database_uri = config.get('ndo', 'database_uri')
+
+ def read_cli(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--host', nargs=1)
+ parser.add_argument('--list', action='store_true')
+ self.options = parser.parse_args()
+
+ def get_hosts(self):
+ engine = create_engine(self.ndo_database_uri)
+ connection = engine.connect()
+ select_hosts = text("SELECT display_name \
+ FROM nagios_hosts")
+ select_hostgroups = text("SELECT alias \
+ FROM nagios_hostgroups")
+ select_hostgroup_hosts = text("SELECT h.display_name \
+ FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
+ WHERE hgm.hostgroup_id = hg.hostgroup_id \
+ AND hgm.host_object_id = h.host_object_id \
+ AND hg.alias =:hostgroup_alias")
+
+ hosts = connection.execute(select_hosts)
+ self.result['all']['hosts'] = [host['display_name'] for host in hosts]
+
+ for hostgroup in connection.execute(select_hostgroups):
+ hostgroup_alias = hostgroup['alias']
+ self.result[hostgroup_alias] = {}
+ hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
+ self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
+
+ def __init__(self):
+
+ self.defaultgroup = 'group_all'
+ self.ndo_database_uri = None
+ self.options = None
+
+ self.read_settings()
+ self.read_cli()
+
+ self.result = {}
+ self.result['all'] = {}
+ self.result['all']['hosts'] = []
+ self.result['_meta'] = {}
+ self.result['_meta']['hostvars'] = {}
+
+ if self.ndo_database_uri:
+ self.get_hosts()
+ if self.options.host:
+ print(json.dumps({}))
+ elif self.options.list:
+ print(json.dumps(self.result))
+ else:
+ print("usage: --list or --host HOSTNAME")
+ exit(1)
+ else:
+ print("Error: Database configuration is missing. See nagios_ndo.ini.")
+ exit(1)
+
+NagiosNDOInventory()
diff --git a/contrib/inventory/nova.py b/contrib/inventory/nova.py
index af2e7a0760..e8f3b9a626 100755
--- a/contrib/inventory/nova.py
+++ b/contrib/inventory/nova.py
@@ -26,6 +26,7 @@ import re
import os
import ConfigParser
from novaclient import client as nova_client
+from six import iteritems
try:
import json
@@ -194,7 +195,7 @@ if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
push(groups, server.name, access_ip)
# Run through each metadata item and add instance to it
- for key, value in server.metadata.iteritems():
+ for key, value in iteritems(server.metadata):
composed_key = to_safe('tag_{0}_{1}'.format(key, value))
push(groups, composed_key, access_ip)
@@ -225,5 +226,5 @@ elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
sys.exit(0)
else:
- print "usage: --list ..OR.. --host <hostname>"
+ print("usage: --list ..OR.. --host <hostname>")
sys.exit(1)
diff --git a/contrib/inventory/openshift.py b/contrib/inventory/openshift.py
index c6acb6ff8c..67d37a7330 100755
--- a/contrib/inventory/openshift.py
+++ b/contrib/inventory/openshift.py
@@ -28,7 +28,6 @@ version_added: None
author: Michael Scherer
'''
-import urllib2
try:
import json
except ImportError:
@@ -39,6 +38,8 @@ import sys
import ConfigParser
import StringIO
+from ansible.module_utils.urls import open_url
+
configparser = None
@@ -61,39 +62,26 @@ def get_config(env_var, config_var):
if not result:
result = get_from_rhc_config(config_var)
if not result:
- print "failed=True msg='missing %s'" % env_var
+ print("failed=True msg='missing %s'" % env_var)
sys.exit(1)
return result
-def get_json_from_api(url):
- req = urllib2.Request(url, None, {'Accept': 'application/json; version=1.5'})
- response = urllib2.urlopen(req)
+def get_json_from_api(url, username, password):
+ headers = {'Accept': 'application/json; version=1.5'}
+ response = open_url(url, headers=headers, url_username=username, url_password=password)
return json.loads(response.read())['data']
-def passwd_setup(top_level_url, username, password):
- # create a password manager
- password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
- password_mgr.add_password(None, top_level_url, username, password)
-
- handler = urllib2.HTTPBasicAuthHandler(password_mgr)
- opener = urllib2.build_opener(handler)
-
- urllib2.install_opener(opener)
-
-
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
-passwd_setup(broker_url, username, password)
-
-response = get_json_from_api(broker_url + '/domains')
+response = get_json_from_api(broker_url + '/domains', username, password)
response = get_json_from_api("%s/domains/%s/applications" %
- (broker_url, response[0]['id']))
+ (broker_url, response[0]['id']), username, password)
result = {}
for app in response:
@@ -109,8 +97,8 @@ for app in response:
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print json.dumps(result)
+ print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print json.dumps({})
+ print(json.dumps({}))
else:
- print "Need an argument, either --list or --host <host>"
+ print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/openvz.py b/contrib/inventory/openvz.py
index fd0bd9ff79..719541cb7b 100755
--- a/contrib/inventory/openvz.py
+++ b/contrib/inventory/openvz.py
@@ -70,8 +70,8 @@ def get_guests():
if len(sys.argv) == 2 and sys.argv[1] == '--list':
inv_json = get_guests()
- print json.dumps(inv_json, sort_keys=True)
+ print(json.dumps(inv_json, sort_keys=True))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print json.dumps({});
+ print(json.dumps({}))
else:
- print "Need an argument, either --list or --host <host>"
+ print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py
index dc022c5dfd..23646fa206 100755
--- a/contrib/inventory/ovirt.py
+++ b/contrib/inventory/ovirt.py
@@ -95,10 +95,10 @@ class OVirtInventory(object):
# Just display data for specific host
if self.args.host:
- print self.json_format_dict(
+ print(self.json_format_dict(
self.node_to_dict(self.get_instance(self.args.host)),
pretty=self.args.pretty
- )
+ ))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py
index 80f6628d97..ab65c342e4 100755
--- a/contrib/inventory/proxmox.py
+++ b/contrib/inventory/proxmox.py
@@ -16,7 +16,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib
-import urllib2
try:
import json
except ImportError:
@@ -25,6 +24,10 @@ import os
import sys
from optparse import OptionParser
+from six import iteritems
+
+from ansible.module_utils.urls import open_url
+
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
@@ -32,7 +35,7 @@ class ProxmoxNodeList(list):
class ProxmoxQemu(dict):
def get_variables(self):
variables = {}
- for key, value in self.iteritems():
+ for key, value in iteritems(self):
variables['proxmox_' + key] = value
return variables
@@ -83,7 +86,7 @@ class ProxmoxAPI(object):
'password': self.options.password,
})
- data = json.load(urllib2.urlopen(request_path, request_params))
+ data = json.load(open_url(request_path, data=request_params))
self.credentials = {
'ticket': data['data']['ticket'],
@@ -91,11 +94,10 @@ class ProxmoxAPI(object):
}
def get(self, url, data=None):
- opener = urllib2.build_opener()
- opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket'])))
-
request_path = '{}{}'.format(self.options.url, url)
- request = opener.open(request_path, data)
+
+ headers = {'Cookie': 'PVEAuthCookie={}'.format(self.credentials['ticket'])}
+ request = open_url(request_path, data=data, headers=headers)
response = json.load(request)
return response['data']
@@ -172,7 +174,7 @@ def main():
if options.pretty:
indent = 2
- print json.dumps(data, indent=indent)
+ print(json.dumps(data, indent=indent))
if __name__ == '__main__':
main()
diff --git a/contrib/inventory/rax.py b/contrib/inventory/rax.py
index a42bbfcfef..0028f54d20 100755
--- a/contrib/inventory/rax.py
+++ b/contrib/inventory/rax.py
@@ -153,6 +153,8 @@ import warnings
import collections
import ConfigParser
+from six import iteritems
+
from ansible.constants import get_config, mk_boolean
try:
@@ -245,7 +247,7 @@ def _list_into_cache(regions):
if cs is None:
warnings.warn(
'Connecting to Rackspace region "%s" has caused Pyrax to '
- 'return a NoneType. Is this a valid region?' % region,
+ 'return None. Is this a valid region?' % region,
RuntimeWarning)
continue
for server in cs.servers.list():
@@ -267,7 +269,7 @@ def _list_into_cache(regions):
hostvars[server.name]['rax_region'] = region
- for key, value in server.metadata.iteritems():
+ for key, value in iteritems(server.metadata):
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
@@ -412,7 +414,7 @@ def setup():
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
- except Exception, e:
+ except Exception as e:
sys.stderr.write("%s: %s\n" % (e, e.message))
sys.exit(1)
diff --git a/contrib/inventory/softlayer.py b/contrib/inventory/softlayer.py
index d2a15b1218..d9d11a5571 100755
--- a/contrib/inventory/softlayer.py
+++ b/contrib/inventory/softlayer.py
@@ -53,10 +53,10 @@ class SoftLayerInventory(object):
if self.args.list:
self.get_all_servers()
- print self.json_format_dict(self.inventory, True)
+ print(self.json_format_dict(self.inventory, True))
elif self.args.host:
self.get_virtual_servers()
- print self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)
+ print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
diff --git a/contrib/inventory/spacewalk.py b/contrib/inventory/spacewalk.py
index b853ca18ba..fb0a152eca 100755
--- a/contrib/inventory/spacewalk.py
+++ b/contrib/inventory/spacewalk.py
@@ -40,6 +40,8 @@ Tested with Ansible 1.9.2 and spacewalk 2.3
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
+from __future__ import print_function
+
import sys
import os
import time
@@ -47,6 +49,8 @@ from optparse import OptionParser
import subprocess
import ConfigParser
+from six import iteritems
+
try:
import json
except:
@@ -60,7 +64,7 @@ INI_FILE = os.path.join(base_dir, "spacewalk.ini")
# Sanity check
if not os.path.exists(SW_REPORT):
- print >> sys.stderr, 'Error: %s is required for operation.' % (SW_REPORT)
+ print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
@@ -132,9 +136,9 @@ try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
-except (OSError), e:
- print >> sys.stderr, 'Problem executing the command "%s system-groups": %s' % \
- (SW_REPORT, str(e))
+except (OSError) as e:
+ print('Problem executing the command "%s system-groups": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
@@ -148,9 +152,9 @@ if options.list:
for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() )
- except (OSError), e:
- print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
- (SW_REPORT, str(e))
+ except (OSError) as e:
+ print('Problem executing the command "%s inventory": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
@@ -185,19 +189,19 @@ if options.list:
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
- except (OSError), e:
- print >> sys.stderr, 'Problem executing the command "%s system-groups-systems": %s' % \
- (SW_REPORT, str(e))
+ except (OSError) as e:
+ print('Problem executing the command "%s system-groups-systems": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
- for group, systems in groups.iteritems():
- print '[%s]\n%s\n' % (group, '\n'.join(systems))
+ for group, systems in iteritems(groups):
+ print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
- final = dict( [ (k, list(s)) for k, s in groups.iteritems() ] )
+ final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] )
final["_meta"] = meta
- print json.dumps( final )
- #print json.dumps(groups)
+ print(json.dumps( final ))
+ #print(json.dumps(groups))
sys.exit(0)
@@ -212,17 +216,17 @@ elif options.host:
host_details = system
break
- except (OSError), e:
- print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
- (SW_REPORT, str(e))
+ except (OSError) as e:
+ print('Problem executing the command "%s inventory": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
- print 'Host: %s' % options.host
- for k, v in host_details.iteritems():
- print ' %s: %s' % (k, '\n '.join(v.split(';')))
+ print('Host: %s' % options.host)
+ for k, v in iteritems(host_details):
+ print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
- print json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) )
+ print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) )
sys.exit(0)
else:
diff --git a/contrib/inventory/ssh_config.py b/contrib/inventory/ssh_config.py
index 55401a664d..3ff7eb9658 100755
--- a/contrib/inventory/ssh_config.py
+++ b/contrib/inventory/ssh_config.py
@@ -99,12 +99,12 @@ def print_list():
if tmp_dict:
meta['hostvars'][alias] = tmp_dict
- print json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})
+ print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta}))
def print_host(host):
cfg = get_config()
- print json.dumps(cfg[host])
+ print(json.dumps(cfg[host]))
def get_args(args_list):
diff --git a/contrib/inventory/vagrant.py b/contrib/inventory/vagrant.py
index 10dc61cdb2..e7ba0dbe58 100755
--- a/contrib/inventory/vagrant.py
+++ b/contrib/inventory/vagrant.py
@@ -113,13 +113,13 @@ if options.list:
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
- print json.dumps({_group: list(ssh_config.keys()), '_meta': meta})
+ print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
- print json.dumps(get_a_ssh_config(options.host))
+ print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
diff --git a/contrib/inventory/vbox.py b/contrib/inventory/vbox.py
index ff31785d7e..630d955fed 100755
--- a/contrib/inventory/vbox.py
+++ b/contrib/inventory/vbox.py
@@ -111,4 +111,4 @@ if __name__ == '__main__':
inventory = get_hosts()
import pprint
- print pprint.pprint(inventory)
+ pprint.pprint(inventory)
diff --git a/contrib/inventory/vmware.py b/contrib/inventory/vmware.py
index b708d59994..8f723a638d 100755
--- a/contrib/inventory/vmware.py
+++ b/contrib/inventory/vmware.py
@@ -28,6 +28,8 @@ take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
+from __future__ import print_function
+
import collections
import json
import logging
@@ -37,6 +39,8 @@ import sys
import time
import ConfigParser
+from six import text_type
+
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
@@ -147,7 +151,7 @@ class VMwareInventory(object):
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
- obj_unicode = unicode(getattr(obj, 'name'))
+ obj_unicode = text_type(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
@@ -164,7 +168,7 @@ class VMwareInventory(object):
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
- except Exception, e:
+ except Exception as e:
pass
return d
elif isinstance(obj, SudsObject):
@@ -207,8 +211,8 @@ class VMwareInventory(object):
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
- except Exception, e:
- print >> sys.stderr, e
+ except Exception as e:
+ print(e, file=sys.stderr)
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
diff --git a/contrib/inventory/windows_azure.py b/contrib/inventory/windows_azure.py
index 9b5197ffc8..d566b0c4d3 100755
--- a/contrib/inventory/windows_azure.py
+++ b/contrib/inventory/windows_azure.py
@@ -51,7 +51,7 @@ try:
from azure import WindowsAzureError
from azure.servicemanagement import ServiceManagementService
except ImportError as e:
- print "failed=True msg='`azure` library required for this script'"
+ print("failed=True msg='`azure` library required for this script'")
sys.exit(1)
@@ -109,7 +109,7 @@ class AzureInventory(object):
# JSONify the data.
data_to_print = self.json_format_dict(data, pretty=True)
- print data_to_print
+ print(data_to_print)
def get_host(self, hostname, jsonify=True):
"""Return information about the given hostname, based on what
@@ -195,9 +195,9 @@ class AzureInventory(object):
for cloud_service in self.sms.list_hosted_services():
self.add_deployments(cloud_service)
except WindowsAzureError as e:
- print "Looks like Azure's API is down:"
- print
- print e
+ print("Looks like Azure's API is down:")
+ print("")
+ print(e)
sys.exit(1)
def add_deployments(self, cloud_service):
@@ -208,9 +208,9 @@ class AzureInventory(object):
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
self.add_deployment(cloud_service, deployment)
except WindowsAzureError as e:
- print "Looks like Azure's API is down:"
- print
- print e
+ print("Looks like Azure's API is down:")
+ print("")
+ print(e)
sys.exit(1)
def add_deployment(self, cloud_service, deployment):
diff --git a/contrib/inventory/zabbix.py b/contrib/inventory/zabbix.py
index 2bc1e2e1cc..9b4d81c309 100755
--- a/contrib/inventory/zabbix.py
+++ b/contrib/inventory/zabbix.py
@@ -30,6 +30,8 @@ Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6.
"""
+from __future__ import print_function
+
import os, sys
import argparse
import ConfigParser
@@ -37,7 +39,8 @@ import ConfigParser
try:
from zabbix_api import ZabbixAPI
except:
- print >> sys.stderr, "Error: Zabbix API library must be installed: pip install zabbix-api."
+ print("Error: Zabbix API library must be installed: pip install zabbix-api.",
+ file=sys.stderr)
sys.exit(1)
try:
@@ -109,24 +112,24 @@ class ZabbixInventory(object):
try:
api = ZabbixAPI(server=self.zabbix_server)
api.login(user=self.zabbix_username, password=self.zabbix_password)
- except BaseException, e:
- print >> sys.stderr, "Error: Could not login to Zabbix server. Check your zabbix.ini."
+ except BaseException as e:
+ print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
- print json.dumps(data, indent=2)
+ print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
- print json.dumps(data, indent=2)
+ print(json.dumps(data, indent=2))
else:
- print >> sys.stderr, "usage: --list ..OR.. --host <hostname>"
+ print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
- print >> sys.stderr, "Error: Configuration of server and credentials are required. See zabbix.ini."
+ print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()
diff --git a/contrib/inventory/zone.py b/contrib/inventory/zone.py
index 163912c7f4..2c71056cc5 100755
--- a/contrib/inventory/zone.py
+++ b/contrib/inventory/zone.py
@@ -36,8 +36,8 @@ result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print json.dumps(result)
+ print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print json.dumps({'ansible_connection': 'zone'})
+ print(json.dumps({'ansible_connection': 'zone'}))
else:
- print "Need an argument, either --list or --host <host>"
+ print("Need an argument, either --list or --host <host>")
diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in
index 00682567e8..2a1a94c5cd 100644
--- a/docs/man/man1/ansible-playbook.1.asciidoc.in
+++ b/docs/man/man1/ansible-playbook.1.asciidoc.in
@@ -151,6 +151,11 @@ run operations with su as this user (default=root)
Run operations with sudo (nopasswd) (deprecated, use become)
+*--ssh-extra-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...''::
+
+Add the specified arguments to any ssh command-line. Useful to set a
+ProxyCommand to use a jump host, but any arguments may be specified.
+
*-U*, 'SUDO_USER', *--sudo-user=*'SUDO_USER'::
Desired sudo user (default=root) (deprecated, use become).
diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in
index b78b7e67a2..520a60bf21 100644
--- a/docs/man/man1/ansible-pull.1.asciidoc.in
+++ b/docs/man/man1/ansible-pull.1.asciidoc.in
@@ -105,6 +105,11 @@ Purge the checkout after the playbook is run.
Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way ot disperse git requests.
+*--ssh-extra-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...''::
+
+Add the specified arguments to any ssh command-line. Useful to set a
+ProxyCommand to use a jump host, but any arguments may be specified.
+
*-t* 'TAGS', *--tags=*'TAGS'::
Only run plays and tasks tagged with these values.
diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1
index 9cadbdd62d..e448d031b9 100644
--- a/docs/man/man1/ansible-vault.1
+++ b/docs/man/man1/ansible-vault.1
@@ -1,13 +1,13 @@
'\" t
.\" Title: ansible-vault
.\" Author: [see the "AUTHOR" section]
-.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
-.\" Date: 07/28/2015
+.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
+.\" Date: 08/27/2015
.\" Manual: System administration commands
.\" Source: Ansible 2.0.0
.\" Language: English
.\"
-.TH "ANSIBLE\-VAULT" "1" "07/28/2015" "Ansible 2\&.0\&.0" "System administration commands"
+.TH "ANSIBLE\-VAULT" "1" "08/27/2015" "Ansible 2\&.0\&.0" "System administration commands"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -80,19 +80,35 @@ The \fBedit\fR sub\-command is used to modify a file which was previously encryp
This command will decrypt the file to a temporary file and allow you to edit the file, saving it back when done and removing the temporary file\&.
.SH "REKEY"
.sp
-*$ ansible\-vault rekey [options] FILE_1 [FILE_2, \&..., FILE_N]
+\fB$ ansible\-vault rekey [options] FILE_1 [FILE_2, \&..., FILE_N]\fR
.sp
The \fBrekey\fR command is used to change the password on a vault\-encrypted files\&. This command can update multiple files at once, and will prompt for both the old and new passwords before modifying any data\&.
.SH "ENCRYPT"
.sp
-*$ ansible\-vault encrypt [options] FILE_1 [FILE_2, \&..., FILE_N]
+\fB$ ansible\-vault encrypt [options] FILE_1 [FILE_2, \&..., FILE_N]\fR
.sp
The \fBencrypt\fR sub\-command is used to encrypt pre\-existing data files\&. As with the \fBrekey\fR command, you can specify multiple files in one command\&.
+.sp
+Starting with version 2\&.0, the \fBencrypt\fR command accepts an \fB\-\-output FILENAME\fR option to determine where encrypted output is stored\&. With this option, input is read from the (at most one) filename given on the command line; if no input file is given, input is read from stdin\&. Either the input or the output file may be given as \fI\-\fR for stdin and stdout respectively\&. If neither input nor output file is given, the command acts as a filter, reading plaintext from stdin and writing it to stdout\&.
+.sp
+Thus any of the following invocations can be used:
+.sp
+\fB$ ansible\-vault encrypt\fR
+.sp
+\fB$ ansible\-vault encrypt \-\-output OUTFILE\fR
+.sp
+\fB$ ansible\-vault encrypt INFILE \-\-output OUTFILE\fR
+.sp
+\fB$ echo secret|ansible\-vault encrypt \-\-output OUTFILE\fR
+.sp
+Reading from stdin and writing only encrypted output is a good way to prevent sensitive data from ever hitting disk (either interactively or from a script)\&.
.SH "DECRYPT"
.sp
-*$ ansible\-vault decrypt [options] FILE_1 [FILE_2, \&..., FILE_N]
+\fB$ ansible\-vault decrypt [options] FILE_1 [FILE_2, \&..., FILE_N]\fR
.sp
The \fBdecrypt\fR sub\-command is used to remove all encryption from data files\&. The files will be stored as plain\-text YAML once again, so be sure that you do not run this command on data files with active passwords or other sensitive data\&. In most cases, users will want to use the \fBedit\fR sub\-command to modify the files securely\&.
+.sp
+As with \fBencrypt\fR, the \fBdecrypt\fR subcommand also accepts the \fB\-\-output FILENAME\fR option to specify where plaintext output is stored, and stdin/stdout is handled as described above\&.
.SH "AUTHOR"
.sp
Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&.
diff --git a/docs/man/man1/ansible-vault.1.asciidoc.in b/docs/man/man1/ansible-vault.1.asciidoc.in
index 3785ab9433..5db71e09e3 100644
--- a/docs/man/man1/ansible-vault.1.asciidoc.in
+++ b/docs/man/man1/ansible-vault.1.asciidoc.in
@@ -84,7 +84,7 @@ file, saving it back when done and removing the temporary file.
REKEY
-----
-*$ ansible-vault rekey [options] FILE_1 [FILE_2, ..., FILE_N]
+*$ ansible-vault rekey [options] FILE_1 [FILE_2, ..., FILE_N]*
The *rekey* command is used to change the password on a vault-encrypted files.
This command can update multiple files at once, and will prompt for both the
@@ -93,21 +93,45 @@ old and new passwords before modifying any data.
ENCRYPT
-------
-*$ ansible-vault encrypt [options] FILE_1 [FILE_2, ..., FILE_N]
+*$ ansible-vault encrypt [options] FILE_1 [FILE_2, ..., FILE_N]*
The *encrypt* sub-command is used to encrypt pre-existing data files. As with the
*rekey* command, you can specify multiple files in one command.
+Starting with version 2.0, the *encrypt* command accepts an *--output FILENAME*
+option to determine where encrypted output is stored. With this option, input is
+read from the (at most one) filename given on the command line; if no input file
+is given, input is read from stdin. Either the input or the output file may be
+given as '-' for stdin and stdout respectively. If neither input nor output file
+is given, the command acts as a filter, reading plaintext from stdin and writing
+it to stdout.
+
+Thus any of the following invocations can be used:
+
+*$ ansible-vault encrypt*
+
+*$ ansible-vault encrypt --output OUTFILE*
+
+*$ ansible-vault encrypt INFILE --output OUTFILE*
+
+*$ echo secret|ansible-vault encrypt --output OUTFILE*
+
+Reading from stdin and writing only encrypted output is a good way to prevent
+sensitive data from ever hitting disk (either interactively or from a script).
+
DECRYPT
-------
-*$ ansible-vault decrypt [options] FILE_1 [FILE_2, ..., FILE_N]
+*$ ansible-vault decrypt [options] FILE_1 [FILE_2, ..., FILE_N]*
The *decrypt* sub-command is used to remove all encryption from data files. The files
will be stored as plain-text YAML once again, so be sure that you do not run this
command on data files with active passwords or other sensitive data. In most cases,
users will want to use the *edit* sub-command to modify the files securely.
+As with *encrypt*, the *decrypt* subcommand also accepts the *--output FILENAME*
+option to specify where plaintext output is stored, and stdin/stdout is handled
+as described above.
AUTHOR
------
diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in
index aaaac33c2a..7578e8f8be 100644
--- a/docs/man/man1/ansible.1.asciidoc.in
+++ b/docs/man/man1/ansible.1.asciidoc.in
@@ -143,6 +143,11 @@ Run operations with su as this user (default=root)
Run the command as the user given by -u and sudo to root.
+*--ssh-extra-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...''::
+
+Add the specified arguments to any ssh command-line. Useful to set a
+ProxyCommand to use a jump host, but any arguments may be specified.
+
*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME'::
Sudo to 'SUDO_USERNAME' instead of root. Implies --sudo.
diff --git a/docsite/README.md b/docsite/README.md
index 5ff774895c..21985a8f6a 100644
--- a/docsite/README.md
+++ b/docsite/README.md
@@ -4,7 +4,7 @@ Homepage and documentation source for Ansible
This project hosts the source behind [docs.ansible.com](http://docs.ansible.com/)
Contributions to the documentation are welcome. To make changes, submit a pull request
-that changes the reStructuredText files in the "rst/" directory only, and Michael can
+that changes the reStructuredText files in the "rst/" directory only, and the core team can
do a docs build and push the static files.
If you wish to verify output from the markup
diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst
index c8738ef08a..64628515c6 100644
--- a/docsite/rst/become.rst
+++ b/docsite/rst/become.rst
@@ -76,7 +76,7 @@ new ones.
.. note:: Methods cannot be chained, you cannot use 'sudo /bin/su -' to become a user, you need to have privileges to run the command as that user in sudo or be able to su directly to it (the same for pbrun, pfexec or other supported methods).
-.. note:: Privilege escalation permissions have to be general, Ansible does not always use a specific command to do something but runs modules (code) from a temporary file name which changes every time. So if you have '/sbin/sevice' or '/bin/chmod' as the allowed commands this will fail with ansible.
+.. note:: Privilege escalation permissions have to be general, Ansible does not always use a specific command to do something but runs modules (code) from a temporary file name which changes every time. So if you have '/sbin/service' or '/bin/chmod' as the allowed commands this will fail with ansible.
.. seealso::
diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst
index cebc1f9152..4afd6cdada 100644
--- a/docsite/rst/community.rst
+++ b/docsite/rst/community.rst
@@ -223,8 +223,9 @@ asking about prospective feature design, or discussions about extending ansible
about new releases of Ansible, and also rare infrequent event information, such as announcements about an AnsibleFest coming up,
which is our official conference series.
-To subscribe to a group from a non-google account, you can email the subscription address, for
-example ansible-devel+subscribe@googlegroups.com.
+`Ansible Lockdown List <https://groups.google.com/forum/#!forum/ansible-lockdown>`_ is for all things related to Ansible Lockdown projects, including DISA STIG automation and CIS Benchmarks.
+
+To subscribe to a group from a non-google account, you can send an email to the subscription address requesting the subscription. For example: ansible-devel+subscribe@googlegroups.com
Release Numbering
-----------------
diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst
index 76fdbd9a44..6eedcb2fa4 100644
--- a/docsite/rst/developing_modules.rst
+++ b/docsite/rst/developing_modules.rst
@@ -67,7 +67,7 @@ Testing Modules
There's a useful test script in the source checkout for ansible::
- git clone git@github.com:ansible/ansible.git --recursive
+ git clone git://github.com/ansible/ansible.git --recursive
source ansible/hacking/env-setup
chmod +x ansible/hacking/test-module
@@ -191,7 +191,7 @@ a lot shorter than this::
Let's test that module::
- ansible/hacking/test-module -m ./time -a time=\"March 14 12:23\"
+ ansible/hacking/test-module -m ./time -a "time=\"March 14 12:23\""
This should return something like::
@@ -457,11 +457,11 @@ Module checklist
* The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work
* Documentation: Make sure it exists
* `required` should always be present, be it true or false
- * If `required` is false you need to document `default`, even if its 'null'
+ * If `required` is false you need to document `default`, even if the default is 'None' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code.
* `default` is not needed for `required: true`
* Remove unnecessary doc like `aliases: []` or `choices: []`
* The version is not a float number and value the current development version
- * The verify that arguments in doc and module spec dict are identical
+ * Verify that arguments in doc and module spec dict are identical
* For password / secret arguments no_log=True should be set
* Requirements should be documented, using the `requirements=[]` field
* Author should be set, name and github id at least
@@ -501,15 +501,14 @@ Module checklist
serializable. A common pitfall is to try returning an object via
exit_json(). Instead, convert the fields you need from the object into the
fields of a dictionary and return the dictionary.
-* Do not use urllib2 to handle urls. urllib2 does not natively verify TLS
- certificates and so is insecure for https. Instead, use either fetch_url or
- open_url from ansible.module_utils.urls.
+* When fetching URLs, please use either fetch_url or open_url from ansible.module_utils.urls
+ rather than urllib2; urllib2 does not natively verify TLS certificates and so is insecure for https.
Windows modules checklist
`````````````````````````
* Favour native powershell and .net ways of doing things over calls to COM libraries or calls to native executables which may or may not be present in all versions of windows
* modules are in powershell (.ps1 files) but the docs reside in same name python file (.py)
-* look at ansible/lib/ansible/module_utils/powershell.ps1 for commmon code, avoid duplication
+* look at ansible/lib/ansible/module_utils/powershell.ps1 for common code, avoid duplication
* start with::
#!powershell
diff --git a/docsite/rst/developing_plugins.rst b/docsite/rst/developing_plugins.rst
index 97be451b54..e1fda78cb0 100644
--- a/docsite/rst/developing_plugins.rst
+++ b/docsite/rst/developing_plugins.rst
@@ -21,7 +21,7 @@ Carrier Pigeon?) it's as simple as copying the format of one of the existing mod
directory. The value of 'smart' for a connection allows selection of paramiko or openssh based on system capabilities, and chooses
'ssh' if OpenSSH supports ControlPersist, in Ansible 1.2.1 an later. Previous versions did not support 'smart'.
-More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/plugins/connections <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/connections>`_ and figure things out pretty easily.
+More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/plugins/connection <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/connection>`_ and figure things out pretty easily.
.. _developing_lookup_plugins:
diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst
index 4635bb57d9..f8bc6a84c3 100644
--- a/docsite/rst/faq.rst
+++ b/docsite/rst/faq.rst
@@ -24,8 +24,8 @@ Setting inventory variables in the inventory file is the easiest way.
For instance, suppose these hosts have different usernames and ports::
[webservers]
- asdf.example.com ansible_ssh_port=5000 ansible_ssh_user=alice
- jkl.example.com ansible_ssh_port=5001 ansible_ssh_user=bob
+ asdf.example.com ansible_port=5000 ansible_user=alice
+ jkl.example.com ansible_port=5001 ansible_user=bob
You can also dictate the connection type to be used, if you want::
@@ -55,6 +55,37 @@ consider managing from a Fedora or openSUSE client even though you are managing
We keep paramiko as the default as if you are first installing Ansible on an EL box, it offers a better experience
for new users.
+.. _use_ssh_jump_hosts:
+
+How do I configure a jump host to access servers that I have no direct access to?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+With Ansible version 2, it's possible to set `ansible_ssh_extra_args` as
+an inventory variable. Any arguments specified this way are added to the
+ssh command line when connecting to the relevant host(s), so it's a good
+way to set a `ProxyCommand`. Consider the following inventory group:
+
+ [gatewayed]
+ foo ansible_host=192.0.2.1
+ bar ansible_host=192.0.2.2
+
+You can create `group_vars/gatewayed.yml` with the following contents:
+
+ ansible_ssh_extra_args: '-o ProxyCommand="ssh -W %h:%p -q user@gateway.example.com"'
+
+Ansible will then add these arguments when trying to connect to any host
+in the group `gatewayed`. (These arguments are added to any `ssh_args`
+that may be configured, so it isn't necessary to repeat the default
+`ControlPath` settings in `ansible_ssh_extra_args`.)
+
+Note that `ssh -W` is available only with OpenSSH 5.4 or later. With
+older versions, it's necessary to execute `nc %h:%p` or some equivalent
+command on the bastion host.
+
+With earlier versions of Ansible, it was necessary to configure a
+suitable `ProxyCommand` for one or more hosts in `~/.ssh/config`,
+or globally by setting `ssh_args` in `ansible.cfg`.
+
.. _ec2_cloud_performance:
How do I speed up management inside EC2?
diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst
index b1cc347eb1..53c956f54e 100644
--- a/docsite/rst/guide_rax.rst
+++ b/docsite/rst/guide_rax.rst
@@ -129,7 +129,7 @@ The rax module returns data about the nodes it creates, like IP addresses, hostn
local_action:
module: add_host
hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.rax_accessipv4 }}"
+ ansible_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
groups: raxhosts
with_items: rax.success
@@ -198,7 +198,7 @@ following information, which will be utilized for inventory and variables.
"_meta": {
"hostvars": {
"test": {
- "ansible_ssh_host": "1.1.1.1",
+ "ansible_host": "1.1.1.1",
"rax_accessipv4": "1.1.1.1",
"rax_accessipv6": "2607:f0d0:1002:51::4",
"rax_addresses": {
@@ -310,7 +310,7 @@ This can be achieved with the ``rax_facts`` module and an inventory file similar
region: "{{ rax_region }}"
- name: Map some facts
set_fact:
- ansible_ssh_host: "{{ rax_accessipv4 }}"
+ ansible_host: "{{ rax_accessipv4 }}"
While you don't need to know how it works, it may be interesting to know what kind of variables are returned.
@@ -516,9 +516,9 @@ Build a complete webserver environment with servers, custom networks and load ba
local_action:
module: add_host
hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.rax_accessipv4 }}"
+ ansible_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
- ansible_ssh_user: root
+ ansible_user: root
groups: web
with_items: rax.success
when: rax.action == 'create'
@@ -601,9 +601,9 @@ Using a Control Machine
local_action:
module: add_host
hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.rax_accessipv4 }}"
+ ansible_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
- ansible_ssh_user: root
+ ansible_user: root
rax_id: "{{ item.rax_id }}"
groups: web,new_web
with_items: rax.success
diff --git a/docsite/rst/guide_vagrant.rst b/docsite/rst/guide_vagrant.rst
index f61fd84feb..122242ccc1 100644
--- a/docsite/rst/guide_vagrant.rst
+++ b/docsite/rst/guide_vagrant.rst
@@ -103,7 +103,7 @@ inventory file may look something like this:
# Generated by Vagrant
- machine ansible_ssh_host=127.0.0.1 ansible_ssh_port=2222
+ machine ansible_host=127.0.0.1 ansible_port=2222
If you want to run Ansible manually, you will want to make sure to pass
``ansible`` or ``ansible-playbook`` commands the correct arguments for the
diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst
index 2cf03a70d2..0e0e6deb64 100644
--- a/docsite/rst/intro_configuration.rst
+++ b/docsite/rst/intro_configuration.rst
@@ -43,7 +43,7 @@ Environmental configuration
```````````````````````````
Ansible also allows configuration of settings via environment variables. If these environment variables are set, they will
-override any setting loaded from the configuration file. These variables are for brevity not defined here, but look in 'constants.py' in the source tree if you want to use these. They are mostly considered to be a legacy system as compared to the config file, but are equally valid.
+override any setting loaded from the configuration file. These variables are for brevity not defined here, but look in `constants.py <https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py>`_ in the source tree if you want to use these. They are mostly considered to be a legacy system as compared to the config file, but are equally valid.
.. _config_values_by_section:
@@ -334,6 +334,11 @@ official examples repos do not use this setting::
The valid values are either 'replace' (the default) or 'merge'.
+.. versionadded: '2.0'
+
+If you want to merge hashes without changing the global settings, use
+the `combine` filter described in :doc:`playbooks_filters`.
+
.. _hostfile:
hostfile
@@ -539,6 +544,27 @@ Additional paths can be provided separated by colon characters, in the same way
Roles will be first searched for in the playbook directory. Should a role not be found, it will indicate all the possible paths
that were searched.
+.. _retry_files_enabled:
+
+retry_files_enabled
+===================
+
+This controls whether a failed Ansible playbook should create a .retry file. The default setting is True::
+
+ retry_files_enabled = False
+
+.. _retry_files_save_path:
+
+retry_files_save_path
+=====================
+
+The retry files save path is where Ansible will save .retry files when a playbook fails and retry_files_enabled is True (the default).
+The default location is ~/ and can be changed to any writeable path::
+
+ retry_files_save_path = ~/.ansible-retry
+
+The directory will be created if it does not already exist.
+
.. _sudo_exe:
sudo_exe
@@ -673,6 +699,18 @@ Ask for privilege escalation password, the default is False::
become_ask_pass=True
+.. _become_allow_same_user:
+
+become_allow_same_user
+======================
+
+Most of the time, using *sudo* to run a command as the same user who is running
+*sudo* itself is unnecessary overhead, so Ansible does not allow it. However,
+depending on the *sudo* configuration, it may be necessary to run a command as
+the same user through *sudo*, such as to switch SELinux contexts. For this
+reason, you can set ``become_allow_same_user`` to ``True`` and disable this
+optimization.
+
.. _paramiko_settings:
Paramiko Specific Settings
diff --git a/docsite/rst/intro_getting_started.rst b/docsite/rst/intro_getting_started.rst
index c1cd5571e6..d6a22a8bb4 100644
--- a/docsite/rst/intro_getting_started.rst
+++ b/docsite/rst/intro_getting_started.rst
@@ -85,6 +85,12 @@ If you would like to access sudo mode, there are also flags to do that:
# as bruce, sudoing to batman
$ ansible all -m ping -u bruce --sudo --sudo-user batman
+ # With latest version of ansible `sudo` is deprecated so use become
+ # as bruce, sudoing to root
+ $ ansible all -m ping -u bruce -b
+ # as bruce, sudoing to batman
+ $ ansible all -m ping -u bruce -b --become-user batman
+
(The sudo implementation is changeable in Ansible's configuration file if you happen to want to use a sudo
replacement. Flags passed to sudo (like -H) can also be set there.)
diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst
index 50a2803aad..c34e668e49 100644
--- a/docsite/rst/intro_installation.rst
+++ b/docsite/rst/intro_installation.rst
@@ -49,15 +49,22 @@ Control Machine Requirements
Currently Ansible can be run from any machine with Python 2.6 or 2.7 installed (Windows isn't supported for the control machine).
This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on.
-
+
+.. note::
+
+ As of 2.0 ansible uses a few more file handles to manage it's forks, OS X has a very low setting so if you want to use 15 or more forks
+ you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 2048``. Or just any time you see a "Too many open files" error.
+
+
.. _managed_node_requirements:
Managed Node Requirements
`````````````````````````
-On the managed nodes, you only need Python 2.4 or later, but if you are running less than Python 2.5 on the remotes, you will also need:
+On the managed nodes, you need a way to communicate, normally ssh. By default this uses sftp, if not available you can switch to scp in ansible.cfg.
+Also you need Python 2.4 or later, but if you are running less than Python 2.5 on the remotes, you will also need:
-* ``python-simplejson``
+* ``python-simplejson``
.. note::
@@ -185,7 +192,7 @@ You can also build an RPM yourself. From the root of a checkout or tarball, use
$ git clone git://github.com/ansible/ansible.git --recursive
$ cd ./ansible
$ make rpm
- $ sudo rpm -Uvh ./rpmbuild/ansible-*.noarch.rpm
+ $ sudo rpm -Uvh ./rpm-build/ansible-*.noarch.rpm
.. _from_apt:
diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst
index 5afffb0fe5..723311ae28 100644
--- a/docsite/rst/intro_inventory.rst
+++ b/docsite/rst/intro_inventory.rst
@@ -48,7 +48,7 @@ To make things explicit, it is suggested that you set them if things are not run
Suppose you have just static IPs and want to set up some aliases that live in your host file, or you are connecting through tunnels. You can also describe hosts like this::
- jumper ansible_ssh_port=5555 ansible_ssh_host=192.168.1.50
+ jumper ansible_port=5555 ansible_host=192.168.1.50
In the above example, trying to ansible against the host alias "jumper" (which may not even be a real hostname) will contact 192.168.1.50 on port 5555. Note that this is using a feature of the inventory file to define some special variables. Generally speaking this is not the best
way to define variables that describe your system policy, but we'll share suggestions on doing this later. We're just getting started.
@@ -69,8 +69,8 @@ You can also select the connection type and user on a per host basis::
[targets]
localhost ansible_connection=local
- other1.example.com ansible_connection=ssh ansible_ssh_user=mpdehaan
- other2.example.com ansible_connection=ssh ansible_ssh_user=mdehaan
+ other1.example.com ansible_connection=ssh ansible_user=mpdehaan
+ other2.example.com ansible_connection=ssh ansible_user=mdehaan
As mentioned above, setting these in the inventory file is only a shorthand, and we'll discuss how to store them in individual files
in the 'host_vars' directory a bit later on.
@@ -201,16 +201,21 @@ Host connection::
SSH connection::
- ansible_ssh_host
+ ansible_host
The name of the host to connect to, if different from the alias you wish to give to it.
- ansible_ssh_port
+ ansible_port
The ssh port number, if not 22
- ansible_ssh_user
+ ansible_user
The default ssh user name to use.
ansible_ssh_pass
The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys)
ansible_ssh_private_key_file
Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent.
+ ansible_ssh_args
+ This setting overrides any ``ssh_args`` configured in ``ansible.cfg``.
+ ansible_ssh_extra_args
+ Additional arguments for ssh. Useful to configure a ``ProxyCommand`` for a certain host (or group).
+ This is used in addition to any ``ssh_args`` configured in ``ansible.cfg`` or the inventory.
Privilege escalation (see :doc:`Ansible Privilege Escalation<become>` for further details)::
@@ -239,7 +244,7 @@ Remote host environment parameters::
Examples from a host file::
- some_host ansible_ssh_port=2222 ansible_ssh_user=manager
+ some_host ansible_port=2222 ansible_user=manager
aws_host ansible_ssh_private_key_file=/home/example/.ssh/aws.pem
freebsd_host ansible_python_interpreter=/usr/local/bin/python
ruby_module_host ansible_ruby_interpreter=/usr/bin/ruby.1.9.3
diff --git a/docsite/rst/intro_patterns.rst b/docsite/rst/intro_patterns.rst
index 07160b2182..4e4b8ab660 100644
--- a/docsite/rst/intro_patterns.rst
+++ b/docsite/rst/intro_patterns.rst
@@ -27,7 +27,7 @@ The following patterns are equivalent and target all hosts in the inventory::
It is also possible to address a specific host or set of hosts by name::
one.example.com
- one.example.com:two.example.com
+ one.example.com, two.example.com
192.168.1.50
192.168.1.*
@@ -35,20 +35,20 @@ The following patterns address one or more groups. Groups separated by a colon
This means the host may be in either one group or the other::
webservers
- webservers:dbservers
+ webservers,dbservers
You can exclude groups as well, for instance, all machines must be in the group webservers but not in the group phoenix::
- webservers:!phoenix
+ webservers,!phoenix
You can also specify the intersection of two groups. This would mean the hosts must be in the group webservers and
the host must also be in the group staging::
- webservers:&staging
+ webservers,&staging
You can do combinations::
- webservers:dbservers:&staging:!phoenix
+ webservers,dbservers,&staging,!phoenix
The above configuration means "all machines in the groups 'webservers' and 'dbservers' are to be managed if they are in
the group 'staging' also, but the machines are not to be managed if they are in the group 'phoenix' ... whew!
@@ -56,7 +56,7 @@ the group 'staging' also, but the machines are not to be managed if they are in
You can also use variables if you want to pass some group specifiers via the "-e" argument to ansible-playbook, but this
is uncommonly used::
- webservers:!{{excluded}}:&{{required}}
+ webservers,!{{excluded}},&{{required}}
You also don't have to manage by strictly defined groups. Individual host names, IPs and groups, can also be referenced using
wildcards::
@@ -66,15 +66,22 @@ wildcards::
It's also ok to mix wildcard patterns and groups at the same time::
- one*.com:dbservers
+ one*.com,dbservers
-As an advanced usage, you can also select the numbered server in a group::
-
- webservers[0]
+You can select a host or subset of hosts from a group by their position. For example, given the following group::
-Or a range of servers in a group::
+ [webservers]
+ cobweb
+ webbing
+ weber
- webservers[0:25]
+You can refer to hosts within the group by adding a subscript to the group name::
+
+ webservers[0] # == cobweb
+ webservers[-1] # == weber
+ webservers[0:1] # == webservers[0],webservers[1]
+ # == cobweb,webbing
+ webservers[1:] # == webbing,weber
Most people don't specify patterns as regular expressions, but you can. Just start the pattern with a '~'::
diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst
index 9bda3e5489..a5b7c619ef 100644
--- a/docsite/rst/intro_windows.rst
+++ b/docsite/rst/intro_windows.rst
@@ -62,14 +62,87 @@ Installing python-kerberos dependencies
Installing python-kerberos
--------------------------
-Once you've installed the necessary dependencies, the python-kerberos wrapper can be installed via pip::
+Once you've installed the necessary dependencies, the python-kerberos wrapper can be installed via pip:
.. code-block:: bash
pip install kerberos
-
+
Kerberos is installed and configured by default on OS X and many Linux distributions. If your control machine has not already done this for you, you will need to.
+Configuring Kerberos
+--------------------
+
+Edit your /etc/krb5.conf (which should be installed as a result of installing packages above) and add the following information for each domain you need to connect to:
+
+In the section that starts with
+
+.. code-block:: bash
+
+ [realms]
+
+add the full domain name and the fully qualified domain names of your primary and secondary Active Directory domain controllers. It should look something like this:
+
+.. code-block:: bash
+
+ [realms]
+
+ MY.DOMAIN.COM = {
+ kdc = domain-controller1.my.domain.com
+ kdc = domain-controller2.my.domain.com
+ }
+
+
+and in the [domain_realm] section add a line like the following for each domain you want to access:
+
+.. code-block:: bash
+
+ [domain_realm]
+ .my.domain.com = MY.DOMAIN.COM
+
+You may wish to configure other settings here, such as the default domain.
+
+Testing a kerberos connection
+-----------------------------
+
+If you have installed krb5-workstation (yum) or krb5-user (apt-get) you can use the following command to test that you can be authorised by your domain controller.
+
+.. code-block:: bash
+
+ kinit user@MY.DOMAIN.COM
+
+Note that the domain part has to be fully qualified and must be in upper case.
+
+To see what tickets if any you have acquired, use the command klist
+
+.. code-block:: bash
+
+ klist
+
+
+Troubleshooting kerberos connections
+------------------------------------
+
+If you unable to connect using kerberos, check the following:
+
+Ensure that forward and reverse DNS lookups are working properly on your domain.
+
+To test this, ping the windows host you want to control by name then use the ip address returned with nslookup. You should get the same name back from DNS when you use nslookup on the ip address.
+
+If you get different hostnames back than the name you originally pinged, speak to your active directory administrator and get them to check that DNS Scavenging is enabled and that DNS and DHCP are updating each other.
+
+Check your ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted.
+
+Check you are using the real fully qualified domain name for the domain. Sometimes domains are commonly known to users by aliases. To check this run:
+
+
+.. code-block:: bash
+
+ kinit -C user@MY.DOMAIN.COM
+ klist
+
+If the domain name returned by klist is different from the domain name you requested, you are requesting using an alias, and you need to update your krb5.conf so you are using the fully qualified domain name, not its alias.
+
.. _windows_inventory:
Inventory
@@ -86,14 +159,14 @@ In group_vars/windows.yml, define the following inventory variables::
# it is suggested that these be encrypted with ansible-vault:
# ansible-vault edit group_vars/windows.yml
- ansible_ssh_user: Administrator
- ansible_ssh_pass: SecretPasswordGoesHere
- ansible_ssh_port: 5986
+ ansible_user: Administrator
+ ansible_password: SecretPasswordGoesHere
+ ansible_port: 5986
ansible_connection: winrm
-Notice that the ssh_port is not actually for SSH, but this is a holdover variable name from how Ansible is mostly an SSH-oriented system. Again, Windows management will not happen over SSH.
+Although Ansible is mostly an SSH-oriented system, Windows management will not happen over SSH (`yet <http://blogs.msdn.com/b/powershell/archive/2015/06/03/looking-forward-microsoft-support-for-secure-shell-ssh.aspx>`).
-If you have installed the ``kerberos`` module and ``ansible_ssh_user`` contains ``@`` (e.g. ``username@realm``), Ansible will first attempt Kerberos authentication. *This method uses the principal you are authenticated to Kerberos with on the control machine and not ``ansible_ssh_user``*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication.
+If you have installed the ``kerberos`` module and ``ansible_user`` contains ``@`` (e.g. ``username@realm``), Ansible will first attempt Kerberos authentication. *This method uses the principal you are authenticated to Kerberos with on the control machine and not ``ansible_user``*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication.
When using your playbook, don't forget to specify --ask-vault-pass to provide the password to unlock the file.
@@ -108,6 +181,14 @@ a version that is 3 or higher.
You'll run this command again later though, to make sure everything is working.
+Since 2.0, the following custom inventory variables are also supported for additional configuration of WinRM connections::
+
+* ``ansible_winrm_scheme``: Specify the connection scheme (``http`` or ``https``) to use for the WinRM connection. Ansible uses ``https`` by default unless the port is 5985.
+* ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint. Ansible uses ``/wsman`` by default.
+* ``ansible_winrm_realm``: Specify the realm to use for Kerberos authentication. If the username contains ``@``, Ansible will use the part of the username after ``@`` by default.
+* ``ansible_winrm_transport``: Specify one or more transports as a comma-separated list. By default, Ansible will use ``kerberos,plaintext`` if the ``kerberos`` module is installed and a realm is defined, otherwise ``plaintext``.
+* ``ansible_winrm_*``: Any additional keyword arguments supported by ``winrm.Protocol`` may be provided.
+
.. _windows_system_prep:
Windows System Prep
diff --git a/docsite/rst/modules_extra.rst b/docsite/rst/modules_extra.rst
index 479013bb66..f9b8ffacc7 100644
--- a/docsite/rst/modules_extra.rst
+++ b/docsite/rst/modules_extra.rst
@@ -10,7 +10,7 @@ This source for these modules is hosted on GitHub in the `ansible-modules-extras
If you believe you have found a bug in an extras module and are already running the latest stable or development version of Ansible,
first look in the `issue tracker at github.com/ansible/ansible-modules-extras <http://github.com/ansible/ansible-modules-extras>`_
-o see if a bug has already been filed. If not, we would be grateful if you would file one.
+to see if a bug has already been filed. If not, we would be grateful if you would file one.
Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_
or on Ansible's "#ansible" channel, located on irc.freenode.net.
diff --git a/docsite/rst/playbooks_acceleration.rst b/docsite/rst/playbooks_acceleration.rst
index 40b77246db..0126b05cb2 100644
--- a/docsite/rst/playbooks_acceleration.rst
+++ b/docsite/rst/playbooks_acceleration.rst
@@ -8,8 +8,7 @@ You Might Not Need This!
Are you running Ansible 1.5 or later? If so, you may not need accelerated mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation.
-For users on 1.5 and later, accelerated mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host
- and still are on paramiko, or (B) can't enable TTYs with sudo as described in the pipelining docs.
+For users on 1.5 and later, accelerated mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host and still are on paramiko, or (B) can't enable TTYs with sudo as described in the pipelining docs.
If you can use pipelining, Ansible will reduce the amount of files transferred over the wire,
making everything much more efficient, and performance will be on par with accelerated mode in nearly all cases, possibly excluding very large file transfer. Because less moving parts are involved, pipelining is better than accelerated mode for nearly all use cases.
diff --git a/docsite/rst/playbooks_error_handling.rst b/docsite/rst/playbooks_error_handling.rst
index fb12990b3f..7b6bba4ed0 100644
--- a/docsite/rst/playbooks_error_handling.rst
+++ b/docsite/rst/playbooks_error_handling.rst
@@ -108,7 +108,7 @@ Aborting the play
Sometimes it's desirable to abort the entire play on failure, not just skip remaining tasks for a host.
-The ``any_errors_fatal`` play option will mark all hosts as failed if any fails, causing an immediate abort.
+The ``any_errors_fatal`` play option will mark all hosts as failed if any fails, causing an immediate abort::
- hosts: somehosts
any_errors_fatal: true
diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst
index 857fc770ba..4c3c170567 100644
--- a/docsite/rst/playbooks_filters.rst
+++ b/docsite/rst/playbooks_filters.rst
@@ -316,6 +316,108 @@ To get a sha256 password hash with a specific salt::
Hash types available depend on the master system running ansible,
'hash' depends on hashlib password_hash depends on crypt.
+.. _combine_filter:
+
+Combining hashes/dictionaries
+-----------------------------
+
+.. versionadded:: 2.0
+
+The `combine` filter allows hashes to be merged. For example, the
+following would override keys in one hash::
+
+ {{ {'a':1, 'b':2}|combine({'b':3}) }}
+
+The resulting hash would be::
+
+ {'a':1, 'b':3}
+
+The filter also accepts an optional `recursive=True` parameter to not
+only override keys in the first hash, but also recurse into nested
+hashes and merge their keys too::
+
+ {{ {'a':{'foo':1, 'bar':2}, 'b':2}|combine({'a':{'bar':3, 'baz':4}}, recursive=True) }}
+
+This would result in::
+
+ {'a':{'foo':1, 'bar':3, 'baz':4}, 'b':2}
+
+The filter can also take multiple arguments to merge::
+
+ {{ a|combine(b, c, d) }}
+
+In this case, keys in `d` would override those in `c`, which would
+override those in `b`, and so on.
+
+This behaviour does not depend on the value of the `hash_behaviour`
+setting in `ansible.cfg`.
+
+.. _comment_filter:
+
+Comment Filter
+--------------
+
+.. versionadded:: 2.0
+
+The `comment` filter allows to decorate the text with a chosen comment
+style. For example the following::
+
+ {{ "Plain style (default)" | comment }}
+
+will produce this output::
+
+ #
+ # Plain style (default)
+ #
+
+Similar way can be applied style for C (``//...``), C block
+(``/*...*/``), Erlang (``%...``) and XML (``<!--...-->``)::
+
+ {{ "C style" | comment('c') }}
+ {{ "C block style" | comment('cblock') }}
+ {{ "Erlang style" | comment('erlang') }}
+ {{ "XML style" | comment('xml') }}
+
+It is also possible to fully customize the comment style::
+
+ {{ "Custom style" | comment('plain', prefix='#######\n#', postfix='#\n#######\n ###\n #') }}
+
+That will create the following output::
+
+ #######
+ #
+ # Custom style
+ #
+ #######
+ ###
+ #
+
+The filter can also be applied to any Ansible variable. For example to
+make the output of the ``ansible_managed`` variable more readable, we can
+change the definition in the ``ansible.cfg`` file to this::
+
+ [defaults]
+
+ ansible_managed = This file is managed by Ansible.%n
+ template: {file}
+ date: %Y-%m-%d %H:%M:%S
+ user: {uid}
+ host: {host}
+
+and then use the variable with the `comment` filter::
+
+ {{ ansible_managed | comment }}
+
+which will produce this output::
+
+ #
+ # This file is managed by Ansible.
+ #
+ # template: /home/ansible/env/dev/ansible_managed/roles/role1/templates/test.j2
+ # date: 2015-09-10 11:02:58
+ # user: ansible
+ # host: myhost
+ #
.. _other_useful_filters:
@@ -346,11 +448,11 @@ To separate the windows drive letter from the rest of a file path (new in versio
{{ path | win_splitdrive }}
-To get only the windows drive letter
+To get only the windows drive letter::
{{ path | win_splitdrive | first }}
-To get the rest of the path without the drive letter
+To get the rest of the path without the drive letter::
{{ path | win_splitdrive | last }}
@@ -413,13 +515,13 @@ To match strings against a regex, use the "match" or "search" filter::
To replace text in a string with regex, use the "regex_replace" filter::
# convert "ansible" to "able"
- {{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }}
+ {{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }}
# convert "foobar" to "bar"
{{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }}
-.. note:: If "regex_replace" filter is used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments),
- then you need to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``).
+.. note:: Prior to ansible 2.0, if "regex_replace" filter was used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments),
+ then you needed to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``).
To escape special characters within a regex, use the "regex_escape" filter::
diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst
index ad53cb9eb4..69eeff6236 100644
--- a/docsite/rst/playbooks_intro.rst
+++ b/docsite/rst/playbooks_intro.rst
@@ -17,7 +17,7 @@ any manual ordered process, even as different steps must bounce back and forth
between sets of machines in particular orders. They can launch tasks
synchronously or asynchronously.
-While you might run the main /usr/bin/ansible program for ad-hoc
+While you might run the main ``/usr/bin/ansible`` program for ad-hoc
tasks, playbooks are more likely to be kept in source control and used
to push out your configuration or assure the configurations of your
remote systems are in spec.
@@ -77,7 +77,7 @@ We can also break task items out over multiple lines using the YAML dictionary
types to supply module arguments. This can be helpful when working with tasks
that have really long parameters or modules that take many parameters to keep
them well structured. Below is another version of the above example but using
-YAML dictionaries to supply the modules with their key=value arguments.::
+YAML dictionaries to supply the modules with their ``key=value`` arguments.::
---
- hosts: webservers
@@ -148,9 +148,9 @@ Hosts and Users
For each play in a playbook, you get to choose which machines in your infrastructure
to target and what remote user to complete the steps (called tasks) as.
-The `hosts` line is a list of one or more groups or host patterns,
+The ``hosts`` line is a list of one or more groups or host patterns,
separated by colons, as described in the :doc:`intro_patterns`
-documentation. The `remote_user` is just the name of the user account::
+documentation. The ``remote_user`` is just the name of the user account::
---
- hosts: webservers
@@ -158,7 +158,7 @@ documentation. The `remote_user` is just the name of the user account::
.. note::
- The `remote_user` parameter was formerly called just `user`. It was renamed in Ansible 1.4 to make it more distinguishable from the `user` module (used to create users on remote systems).
+ The ``remote_user`` parameter was formerly called just ``user``. It was renamed in Ansible 1.4 to make it more distinguishable from the **user** module (used to create users on remote systems).
Remote users can also be defined per task::
@@ -172,7 +172,7 @@ Remote users can also be defined per task::
.. note::
- The `remote_user` parameter for tasks was added in 1.4.
+ The ``remote_user`` parameter for tasks was added in 1.4.
Support for running things as another user is also available (see :doc:`become`)::
@@ -212,21 +212,21 @@ You can also use other privilege escalation methods, like su::
become: yes
become_method: su
-If you need to specify a password to sudo, run `ansible-playbook` with ``--ask-become-pass`` or
-when using the old sudo syntax ``--ask-sudo-pass`` (`-K`). If you run a become playbook and the
+If you need to specify a password to sudo, run ``ansible-playbook`` with ``--ask-become-pass`` or
+when using the old sudo syntax ``--ask-sudo-pass`` (``-K``). If you run a become playbook and the
playbook seems to hang, it's probably stuck at the privilege escalation prompt.
Just `Control-C` to kill it and run it again adding the appropriate password.
.. important::
- When using `become_user` to a user other than root, the module
- arguments are briefly written into a random tempfile in /tmp.
+ When using ``become_user`` to a user other than root, the module
+ arguments are briefly written into a random tempfile in ``/tmp``.
These are deleted immediately after the command is executed. This
only occurs when changing privileges from a user like 'bob' to 'timmy',
not when going from 'bob' to 'root', or logging in directly as 'bob' or
'root'. If it concerns you that this data is briefly readable
(not writable), avoid transferring unencrypted passwords with
- `become_user` set. In other cases, '/tmp' is not used and this does
+ `become_user` set. In other cases, ``/tmp`` is not used and this does
not come into play. Ansible also takes care to not log password
parameters.
@@ -253,38 +253,38 @@ system to the desired state. This makes it very safe to rerun
the same playbook multiple times. They won't change things
unless they have to change things.
-The `command` and `shell` modules will typically rerun the same command again,
+The **command** and **shell** modules will typically rerun the same command again,
which is totally ok if the command is something like
-'chmod' or 'setsebool', etc. Though there is a 'creates' flag available which can
+``chmod`` or ``setsebool``, etc. Though there is a ``creates`` flag available which can
be used to make these modules also idempotent.
-Every task should have a `name`, which is included in the output from
+Every task should have a ``name``, which is included in the output from
running the playbook. This is output for humans, so it is
nice to have reasonably good descriptions of each task step. If the name
is not provided though, the string fed to 'action' will be used for
output.
-Tasks can be declared using the legacy "action: module options" format, but
-it is recommended that you use the more conventional "module: options" format.
+Tasks can be declared using the legacy ``action: module options`` format, but
+it is recommended that you use the more conventional ``module: options`` format.
This recommended format is used throughout the documentation, but you may
encounter the older format in some playbooks.
Here is what a basic task looks like. As with most modules,
-the service module takes key=value arguments::
+the service module takes ``key=value`` arguments::
tasks:
- name: make sure apache is running
service: name=httpd state=running
-The `command` and `shell` modules are the only modules that just take a list
-of arguments and don't use the key=value form. This makes
+The **command** and **shell** modules are the only modules that just take a list
+of arguments and don't use the ``key=value`` form. This makes
them work as simply as you would expect::
tasks:
- name: disable selinux
command: /sbin/setenforce 0
-The command and shell module care about return codes, so if you have a command
+The **command** and **shell** module care about return codes, so if you have a command
whose successful exit code is not zero, you may wish to do this::
tasks:
@@ -308,7 +308,7 @@ a space and indent any continuation lines::
owner=root group=root mode=0644
Variables can be used in action lines. Suppose you defined
-a variable called 'vhost' in the 'vars' section, you could do this::
+a variable called ``vhost`` in the ``vars`` section, you could do this::
tasks:
- name: create a virtual host file for {{ vhost }}
@@ -317,7 +317,7 @@ a variable called 'vhost' in the 'vars' section, you could do this::
Those same variables are usable in templates, which we'll get to later.
Now in a very basic playbook all the tasks will be listed directly in that play, though it will usually
-make more sense to break up tasks using the 'include:' directive. We'll show that a bit later.
+make more sense to break up tasks using the ``include:`` directive. We'll show that a bit later.
.. _action_shorthand:
@@ -361,7 +361,7 @@ change, but only if the file changes::
- restart memcached
- restart apache
-The things listed in the 'notify' section of a task are called
+The things listed in the ``notify`` section of a task are called
handlers.
Handlers are lists of tasks, not really any different from regular
@@ -388,7 +388,7 @@ won't need them for much else.
`* <https://github.com/ansible/ansible/issues/4943>`_
Roles are described later on. It's worthwhile to point out that handlers are
-automatically processed between 'pre_tasks', 'roles', 'tasks', and 'post_tasks'
+automatically processed between ``pre_tasks``, ``roles``, ``tasks``, and ``post_tasks``
sections. If you ever want to flush all the handler commands immediately though,
in 1.2 and later, you can::
@@ -397,7 +397,7 @@ in 1.2 and later, you can::
- meta: flush_handlers
- shell: some other tasks
-In the above example any queued up handlers would be processed early when the 'meta'
+In the above example any queued up handlers would be processed early when the ``meta``
statement was reached. This is a bit of a niche case but can come in handy from
time to time.
@@ -419,14 +419,14 @@ Ansible-Pull
Should you want to invert the architecture of Ansible, so that nodes check in to a central location, instead
of pushing configuration out to them, you can.
-Ansible-pull is a small script that will checkout a repo of configuration instructions from git, and then
-run ansible-playbook against that content.
+The ``ansible-pull`` is a small script that will checkout a repo of configuration instructions from git, and then
+run ``ansible-playbook`` against that content.
-Assuming you load balance your checkout location, ansible-pull scales essentially infinitely.
+Assuming you load balance your checkout location, ``ansible-pull`` scales essentially infinitely.
Run ``ansible-pull --help`` for details.
-There's also a `clever playbook <https://github.com/ansible/ansible-examples/blob/master/language_features/ansible_pull.yml>`_ available to configure ansible-pull via a crontab from push mode.
+There's also a `clever playbook <https://github.com/ansible/ansible-examples/blob/master/language_features/ansible_pull.yml>`_ available to configure ``ansible-pull`` via a crontab from push mode.
.. _tips_and_tricks:
diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst
index 237a43e254..877ec4702f 100644
--- a/docsite/rst/playbooks_lookups.rst
+++ b/docsite/rst/playbooks_lookups.rst
@@ -100,7 +100,7 @@ The CSV File Lookup
The ``csvfile`` lookup reads the contents of a file in CSV (comma-separated value)
format. The lookup looks for the row where the first column matches ``keyname``, and
-returns the value in the first column, unless a different column is specified.
+returns the value in the second column, unless a different column is specified.
The example below shows the contents of a CSV file named elements.csv with information about the
periodic table of elements::
@@ -176,7 +176,7 @@ Here's a simple properties we'll take as an example::
You can retrieve the ``user.name`` field with the following lookup::
- - debug: msg="user.name is {{ lookup('ini', 'user.name type=property file=user.properties') }}"
+ - debug: msg="user.name is {{ lookup('ini', 'user.name type=properties file=user.properties') }}"
The ``ini`` lookup supports several arguments like the csv plugin. The format for passing
arguments is::
diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst
index c47dfe0fc7..39bd9c335e 100644
--- a/docsite/rst/playbooks_variables.rst
+++ b/docsite/rst/playbooks_variables.rst
@@ -16,8 +16,8 @@ You might have some templates for configuration files that are mostly the same,
Variables in Ansible are how we deal with differences between systems.
To understand variables you'll also want to dig into :doc:`playbooks_conditionals` and :doc:`playbooks_loops`.
-Useful things like the "group_by" module
-and the "when" conditional can also be used with variables, and to help manage differences between systems.
+Useful things like the **group_by** module
+and the ``when`` conditional can also be used with variables, and to help manage differences between systems.
It's highly recommended that you consult the ansible-examples github repository to see a lot of examples of variables put to use.
@@ -30,9 +30,9 @@ Before we start using variables it's important to know what are valid variable n
Variable names should be letters, numbers, and underscores. Variables should always start with a letter.
-"foo_port" is a great variable. "foo5" is fine too.
+``foo_port`` is a great variable. ``foo5`` is fine too.
-"foo-port", "foo port", "foo.port" and "12" are not valid variable names.
+``foo-port``, ``foo port``, ``foo.port`` and ``12`` are not valid variable names.
Easy enough, let's move on.
@@ -121,7 +121,7 @@ for a list of available filters and example usage guide.
Hey Wait, A YAML Gotcha
```````````````````````
-YAML syntax requires that if you start a value with {{ foo }} you quote the whole line, since it wants to be
+YAML syntax requires that if you start a value with ``{{ foo }}`` you quote the whole line, since it wants to be
sure you aren't trying to start a YAML dictionary. This is covered on the :doc:`YAMLSyntax` page.
This won't work::
@@ -396,7 +396,7 @@ and the unqualified hostname shows the string before the first period(.)::
Facts are frequently used in conditionals (see :doc:`playbooks_conditionals`) and also in templates.
-Facts can be also used to create dynamic groups of hosts that match particular criteria, see the :doc:`modules` documentation on 'group_by' for details, as well as in generalized conditional statements as discussed in the :doc:`playbooks_conditionals` chapter.
+Facts can be also used to create dynamic groups of hosts that match particular criteria, see the :doc:`modules` documentation on **group_by** for details, as well as in generalized conditional statements as discussed in the :doc:`playbooks_conditionals` chapter.
.. _disabling_facts:
@@ -419,7 +419,7 @@ Local Facts (Facts.d)
As discussed in the playbooks chapter, Ansible facts are a way of getting data about remote systems for use in playbook variables.
-Usually these are discovered automatically by the 'setup' module in Ansible. Users can also write custom facts modules, as described
+Usually these are discovered automatically by the **setup** module in Ansible. Users can also write custom facts modules, as described
in the API guide. However, what if you want to have a simple way to provide system or user
provided data for use in Ansible variables, without writing a fact module?
@@ -427,16 +427,16 @@ For instance, what if you want users to be able to control some aspect about how
.. note:: Perhaps "local facts" is a bit of a misnomer, it means "locally supplied user values" as opposed to "centrally supplied user values", or what facts are -- "locally dynamically determined values".
-If a remotely managed system has an "/etc/ansible/facts.d" directory, any files in this directory
-ending in ".fact", can be JSON, INI, or executable files returning JSON, and these can supply local facts in Ansible.
+If a remotely managed system has an ``/etc/ansible/facts.d`` directory, any files in this directory
+ending in ``.fact``, can be JSON, INI, or executable files returning JSON, and these can supply local facts in Ansible.
-For instance assume a /etc/ansible/facts.d/preferences.fact::
+For instance assume a ``/etc/ansible/facts.d/preferences.fact``::
[general]
asdf=1
bar=2
-This will produce a hash variable fact named "general" with 'asdf' and 'bar' as members.
+This will produce a hash variable fact named ``general`` with ``asdf`` and ``bar`` as members.
To validate this, run the following::
ansible <hostname> -m setup -a "filter=ansible_local"
@@ -452,7 +452,7 @@ And you will see the following fact added::
}
}
-And this data can be accessed in a template/playbook as::
+And this data can be accessed in a ``template/playbook`` as::
{{ ansible_local.preferences.general.asdf }}
@@ -498,11 +498,11 @@ not be necessary to "hit" all servers to reference variables and information abo
With fact caching enabled, it is possible for machine in one group to reference variables about machines in the other group, despite
the fact that they have not been communicated with in the current execution of /usr/bin/ansible-playbook.
-To benefit from cached facts, you will want to change the 'gathering' setting to 'smart' or 'explicit' or set 'gather_facts' to False in most plays.
+To benefit from cached facts, you will want to change the ``gathering`` setting to ``smart`` or ``explicit`` or set ``gather_facts`` to ``False`` in most plays.
Currently, Ansible ships with two persistent cache plugins: redis and jsonfile.
-To configure fact caching using redis, enable it in ansible.cfg as follows::
+To configure fact caching using redis, enable it in ``ansible.cfg`` as follows::
[defaults]
gathering = smart
@@ -520,7 +520,7 @@ Note that the Python redis library should be installed from pip, the version pac
In current embodiments, this feature is in beta-level state and the Redis plugin does not support port or password configuration, this is expected to change in the near future.
-To configure fact caching using jsonfile, enable it in ansible.cfg as follows::
+To configure fact caching using jsonfile, enable it in ``ansible.cfg`` as follows::
[defaults]
gathering = smart
@@ -529,7 +529,7 @@ To configure fact caching using jsonfile, enable it in ansible.cfg as follows::
fact_caching_timeout = 86400
# seconds
-`fact_caching_connection` is a local filesystem path to a writeable
+``fact_caching_connection`` is a local filesystem path to a writeable
directory (ansible will attempt to create the directory if one does not exist).
.. _registered_variables:
@@ -537,7 +537,7 @@ directory (ansible will attempt to create the directory if one does not exist).
Registered Variables
````````````````````
-Another major use of variables is running a command and using the result of that command to save the result into a variable. Results will vary from module to module. Use of -v when executing playbooks will show possible values for the results.
+Another major use of variables is running a command and using the result of that command to save the result into a variable. Results will vary from module to module. Use of ``-v`` when executing playbooks will show possible values for the results.
The value of a task being executed in ansible can be saved in a variable and used later. See some examples of this in the
:doc:`playbooks_conditionals` chapter.
@@ -558,6 +558,8 @@ While it's mentioned elsewhere in that document too, here's a quick syntax examp
Registered variables are valid on the host the remainder of the playbook run, which is the same as the lifetime of "facts"
in Ansible. Effectively registered variables are just like facts.
+.. note:: If a task fails or is skipped, the variable still is registered with a failure or skipped status, the only way to avoid registering a variable is using tags.
+
.. _accessing_complex_variable_data:
Accessing Complex Variable Data
@@ -566,7 +568,7 @@ Accessing Complex Variable Data
We already talked about facts a little higher up in the documentation.
Some provided facts, like networking information, are made available as nested data structures. To access
-them a simple {{ foo }} is not sufficient, but it is still easy to do. Here's how we get an IP address::
+them a simple ``{{ foo }}`` is not sufficient, but it is still easy to do. Here's how we get an IP address::
{{ ansible_eth0["ipv4"]["address"] }}
@@ -584,10 +586,10 @@ Magic Variables, and How To Access Information About Other Hosts
````````````````````````````````````````````````````````````````
Even if you didn't define them yourself, Ansible provides a few variables for you automatically.
-The most important of these are 'hostvars', 'group_names', and 'groups'. Users should not use
-these names themselves as they are reserved. 'environment' is also reserved.
+The most important of these are ``hostvars``, ``group_names``, and ``groups``. Users should not use
+these names themselves as they are reserved. ``environment`` is also reserved.
-Hostvars lets you ask about the variables of another host, including facts that have been gathered
+``hostvars`` lets you ask about the variables of another host, including facts that have been gathered
about that host. If, at this point, you haven't talked to that host yet in any play in the playbook
or set of playbooks, you can get at the variables, but you will not be able to see the facts.
@@ -596,13 +598,13 @@ assigned to another node, it's easy to do so within a template or even an action
{{ hostvars['test.example.com']['ansible_distribution'] }}
-Additionally, *group_names* is a list (array) of all the groups the current host is in. This can be used in templates using Jinja2 syntax to make template source files that vary based on the group membership (or role) of the host::
+Additionally, ``group_names`` is a list (array) of all the groups the current host is in. This can be used in templates using Jinja2 syntax to make template source files that vary based on the group membership (or role) of the host::
{% if 'webserver' in group_names %}
# some part of a configuration file that only applies to webservers
{% endif %}
-*groups* is a list of all the groups (and hosts) in the inventory. This can be used to enumerate all hosts within a group.
+``groups`` is a list of all the groups (and hosts) in the inventory. This can be used to enumerate all hosts within a group.
For example::
{% for host in groups['app_servers'] %}
@@ -618,20 +620,20 @@ A frequently used idiom is walking a group to find all IP addresses in that grou
An example of this could include pointing a frontend proxy server to all of the app servers, setting up the correct firewall rules between servers, etc.
You need to make sure that the facts of those hosts have been populated before though, for example by running a play against them if the facts have not been cached recently (fact caching was added in Ansible 1.8).
-Additionally, *inventory_hostname* is the name of the hostname as configured in Ansible's inventory host file. This can
-be useful for when you don't want to rely on the discovered hostname `ansible_hostname` or for other mysterious
-reasons. If you have a long FQDN, *inventory_hostname_short* also contains the part up to the first
+Additionally, ``inventory_hostname`` is the name of the hostname as configured in Ansible's inventory host file. This can
+be useful for when you don't want to rely on the discovered hostname ``ansible_hostname`` or for other mysterious
+reasons. If you have a long FQDN, ``inventory_hostname_short`` also contains the part up to the first
period, without the rest of the domain.
-*play_hosts* is available as a list of hostnames that are in scope for the current play. This may be useful for filling out templates with multiple hostnames or for injecting the list into the rules for a load balancer.
+``play_hosts`` is available as a list of hostnames that are in scope for the current play. This may be useful for filling out templates with multiple hostnames or for injecting the list into the rules for a load balancer.
-*delegate_to* is the inventory hostname of the host that the current task has been delegated to using 'delegate_to'.
+``delegate_to`` is the inventory hostname of the host that the current task has been delegated to using ``delegate_to`` keyword.
Don't worry about any of this unless you think you need it. You'll know when you do.
-Also available, *inventory_dir* is the pathname of the directory holding Ansible's inventory host file, *inventory_file* is the pathname and the filename pointing to the Ansible's inventory host file.
+Also available, ``inventory_dir`` is the pathname of the directory holding Ansible's inventory host file, ``inventory_file`` is the pathname and the filename pointing to the Ansible's inventory host file.
-And finally, *role_path* will return the current role's pathname (since 1.8). This will only work inside a role.
+And finally, ``role_path`` will return the current role's pathname (since 1.8). This will only work inside a role.
.. _variable_file_separation_details:
@@ -679,7 +681,7 @@ The contents of each variables file is a simple YAML dictionary, like this::
Passing Variables On The Command Line
`````````````````````````````````````
-In addition to `vars_prompt` and `vars_files`, it is possible to send variables over
+In addition to ``vars_prompt`` and ``vars_files``, it is possible to send variables over
the Ansible command line. This is particularly useful when writing a generic release playbook
where you may want to pass in the version of the application to deploy::
@@ -703,9 +705,9 @@ As of Ansible 1.2, you can also pass in extra vars as quoted JSON, like so::
--extra-vars '{"pacman":"mrs","ghosts":["inky","pinky","clyde","sue"]}'
-The key=value form is obviously simpler, but it's there if you need it!
+The ``key=value`` form is obviously simpler, but it's there if you need it!
-As of Ansible 1.3, extra vars can be loaded from a JSON file with the "@" syntax::
+As of Ansible 1.3, extra vars can be loaded from a JSON file with the ``@`` syntax::
--extra-vars "@some_file.json"
@@ -731,8 +733,8 @@ a use for it.
If multiple variables of the same name are defined in different places, they win in a certain order, which is::
- * extra vars (-e in the command line) always win
- * then comes connection variables defined in inventory (ansible_ssh_user, etc)
+ * extra vars (``-e`` in the command line) always win
+ * then comes connection variables defined in inventory (``ansible_user``, etc)
* then comes "most everything else" (command line switches, vars in play, included vars, role vars, etc)
* then comes the rest of the variables defined in inventory
* then comes facts discovered about a system
@@ -745,7 +747,7 @@ control you might want over values.
First off, group variables are super powerful.
-Site wide defaults should be defined as a 'group_vars/all' setting. Group variables are generally placed alongside
+Site wide defaults should be defined as a ``group_vars/all`` setting. Group variables are generally placed alongside
your inventory file. They can also be returned by a dynamic inventory script (see :doc:`intro_dynamic_inventory`) or defined
in things like :doc:`tower` from the UI or API::
@@ -754,7 +756,7 @@ in things like :doc:`tower` from the UI or API::
# this is the site wide default
ntp_server: default-time.example.com
-Regional information might be defined in a 'group_vars/region' variable. If this group is a child of the 'all' group (which it is, because all groups are), it will override the group that is higher up and more general::
+Regional information might be defined in a ``group_vars/region`` variable. If this group is a child of the ``all`` group (which it is, because all groups are), it will override the group that is higher up and more general::
---
# file: /etc/ansible/group_vars/boston
@@ -775,7 +777,7 @@ Next up: learning about role variable precedence.
We'll pretty much assume you are using roles at this point. You should be using roles for sure. Roles are great. You are using
roles aren't you? Hint hint.
-Ok, so if you are writing a redistributable role with reasonable defaults, put those in the 'roles/x/defaults/main.yml' file. This means
+Ok, so if you are writing a redistributable role with reasonable defaults, put those in the ``roles/x/defaults/main.yml`` file. This means
the role will bring along a default value but ANYTHING in Ansible will override it. It's just a default. That's why it says "defaults" :)
See :doc:`playbooks_roles` for more info about this::
@@ -784,8 +786,8 @@ See :doc:`playbooks_roles` for more info about this::
# if not overridden in inventory or as a parameter, this is the value that will be used
http_port: 80
-if you are writing a role and want to ensure the value in the role is absolutely used in that role, and is not going to be overridden
-by inventory, you should put it in roles/x/vars/main.yml like so, and inventory values cannot override it. -e however, still will::
+If you are writing a role and want to ensure the value in the role is absolutely used in that role, and is not going to be overridden
+by inventory, you should put it in ``roles/x/vars/main.yml`` like so, and inventory values cannot override it. ``-e`` however, still will::
---
# file: roles/x/vars/main.yml
@@ -823,7 +825,7 @@ So that's a bit about roles.
There are a few bonus things that go on with roles.
-Generally speaking, variables set in one role are available to others. This means if you have a "roles/common/vars/main.yml" you
+Generally speaking, variables set in one role are available to others. This means if you have a ``roles/common/vars/main.yml`` you
can set variables in there and make use of them in other roles and elsewhere in your playbook::
roles:
@@ -837,7 +839,7 @@ can set variables in there and make use of them in other roles and elsewhere in
So, that's precedence, explained in a more direct way. Don't worry about precedence, just think about if your role is defining a
variable that is a default, or a "live" variable you definitely want to use. Inventory lies in precedence right in the middle, and
-if you want to forcibly override something, use -e.
+if you want to forcibly override something, use ``-e``.
If you found that a little hard to understand, take a look at the `ansible-examples`_ repo on our github for a bit more about
how all of these things can work together.
diff --git a/examples/ansible.cfg b/examples/ansible.cfg
index 4ab8cca172..e79fa4ee84 100644
--- a/examples/ansible.cfg
+++ b/examples/ansible.cfg
@@ -125,12 +125,13 @@ ansible_managed = Ansible managed: {file} on {host}
# set plugin path directories here, separate with colons
-action_plugins = /usr/share/ansible_plugins/action_plugins
-callback_plugins = /usr/share/ansible_plugins/callback_plugins
-connection_plugins = /usr/share/ansible_plugins/connection_plugins
-lookup_plugins = /usr/share/ansible_plugins/lookup_plugins
-vars_plugins = /usr/share/ansible_plugins/vars_plugins
-filter_plugins = /usr/share/ansible_plugins/filter_plugins
+#action_plugins = /usr/share/ansible/plugins/action
+#callback_plugins = /usr/share/ansible/plugins/callback
+#connection_plugins = /usr/share/ansible/plugins/connection
+#lookup_plugins = /usr/share/ansible/plugins/lookup
+#vars_plugins = /usr/share/ansible/plugins/vars
+#filter_plugins = /usr/share/ansible/plugins/filter
+#test_plugins = /usr/share/ansible/plugins/test
# by default callbacks are not loaded for /bin/ansible, enable this if you
# want, for example, a notification or logging callback to also apply to
@@ -169,6 +170,10 @@ fact_caching = memory
# retry files
+# When a playbook fails by default a .retry file will be created in ~/
+# You can disable this feature by setting retry_files_enabled to False
+# and you can change the location of the files by setting retry_files_save_path
+
#retry_files_enabled = False
#retry_files_save_path = ~/.ansible-retry
diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1
index 1b45ce442b..a67ea8afb2 100644
--- a/examples/scripts/ConfigureRemotingForAnsible.ps1
+++ b/examples/scripts/ConfigureRemotingForAnsible.ps1
@@ -105,31 +105,6 @@ Else
Write-Verbose "PS Remoting is already enabled."
}
-
-# Test a remoting connection to localhost, which should work.
-$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue
-$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
-
-$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue
-
-If ($httpResult -and $httpsResult)
-{
- Write-Verbose "HTTP and HTTPS sessions are enabled."
-}
-ElseIf ($httpsResult -and !$httpResult)
-{
- Write-Verbose "HTTP sessions are disabled, HTTPS session are enabled."
-}
-ElseIf ($httpResult -and !$httpsResult)
-{
- Write-Verbose "HTTPS sessions are disabled, HTTP session are enabled."
-}
-Else
-{
- Throw "Unable to establish an HTTP or HTTPS remoting session."
-}
-
-
# Make sure there is a SSL listener.
$listeners = Get-ChildItem WSMan:\localhost\Listener
If (!($listeners | Where {$_.Keys -like "TRANSPORT=HTTPS"}))
@@ -194,5 +169,27 @@ Else
Write-Verbose "Firewall rule already exists to allow WinRM HTTPS."
}
+# Test a remoting connection to localhost, which should work.
+$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue
+$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
+
+$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue
+
+If ($httpResult -and $httpsResult)
+{
+ Write-Verbose "HTTP and HTTPS sessions are enabled."
+}
+ElseIf ($httpsResult -and !$httpResult)
+{
+ Write-Verbose "HTTP sessions are disabled, HTTPS session are enabled."
+}
+ElseIf ($httpResult -and !$httpsResult)
+{
+ Write-Verbose "HTTPS sessions are disabled, HTTP session are enabled."
+}
+Else
+{
+ Throw "Unable to establish an HTTP or HTTPS remoting session."
+}
Write-Verbose "PS Remoting has been successfully configured for Ansible."
diff --git a/examples/scripts/yaml_to_ini.py b/examples/scripts/yaml_to_ini.py
index 09b9b5ec82..981176b2f2 100755
--- a/examples/scripts/yaml_to_ini.py
+++ b/examples/scripts/yaml_to_ini.py
@@ -23,6 +23,7 @@ from ansible import utils
import os
import yaml
import sys
+from six import iteritems
class InventoryParserYaml(object):
''' Host inventory parser for ansible '''
@@ -176,7 +177,7 @@ if __name__ == "__main__":
groupfh.write(yaml.dump(record.get_variables()))
groupfh.close()
- for (host_name, host_record) in yamlp._hosts.iteritems():
+ for (host_name, host_record) in iteritems(yamlp._hosts):
hostfiledir = os.path.join(dirname, "host_vars")
if not os.path.exists(hostfiledir):
print "* creating: %s" % hostfiledir
diff --git a/hacking/env-setup b/hacking/env-setup
index 8ba483279b..0baf03f533 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -25,7 +25,7 @@ fi
# The below is an alternative to readlink -fn which doesn't exist on OS X
# Source: http://stackoverflow.com/a/1678636
FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-export ANSIBLE_HOME=$(dirname "$FULL_PATH")
+export ANSIBLE_HOME="$(dirname "$FULL_PATH")"
PREFIX_PYTHONPATH="$ANSIBLE_HOME/lib"
PREFIX_PATH="$ANSIBLE_HOME/bin"
@@ -57,8 +57,10 @@ fi
cd "$ANSIBLE_HOME"
if [ "$verbosity" = silent ] ; then
gen_egg_info > /dev/null 2>&1
+ find . -type f -name "*.pyc" -delete > /dev/null 2>&1
else
gen_egg_info
+ find . -type f -name "*.pyc" -delete
fi
cd "$current_dir"
)
diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish
index 1b872f4dc0..d0bc717905 100644
--- a/hacking/env-setup.fish
+++ b/hacking/env-setup.fish
@@ -43,6 +43,7 @@ if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
end
mv ansible*egg-info $PREFIX_PYTHONPATH
+find . -type f -name "*.pyc" -delete
popd
diff --git a/hacking/get_library.py b/hacking/get_library.py
index 571183b688..bdb96f680f 100755
--- a/hacking/get_library.py
+++ b/hacking/get_library.py
@@ -22,7 +22,7 @@ import ansible.constants as C
import sys
def main():
- print C.DEFAULT_MODULE_PATH
+ print(C.DEFAULT_MODULE_PATH)
return 0
if __name__ == '__main__':
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index 30b8d6a103..00c9f57790 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -18,6 +18,7 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
+from __future__ import print_function
import os
import glob
import sys
@@ -33,6 +34,7 @@ import subprocess
import cgi
import warnings
from jinja2 import Environment, FileSystemLoader
+from six import iteritems
from ansible.utils import module_docs
from ansible.utils.vars import merge_hash
@@ -120,7 +122,7 @@ def write_data(text, options, outputname, module):
f.write(text.encode('utf-8'))
f.close()
else:
- print text
+ print(text)
#####################################################################################
@@ -249,7 +251,7 @@ def process_module(module, options, env, template, outputname, module_map, alias
deprecated = True
module = module.replace("_","",1)
- print "rendering: %s" % module
+ print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
@@ -291,7 +293,7 @@ def process_module(module, options, env, template, outputname, module_map, alias
del doc['version_added']
if 'options' in doc and doc['options']:
- for (k,v) in doc['options'].iteritems():
+ for (k,v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
@@ -345,7 +347,7 @@ def process_category(category, categories, options, env, template, outputname):
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
- print "*** recording category %s in %s ***" % (category, category_file_path)
+ print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
@@ -419,13 +421,13 @@ def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
- print >>sys.stderr, "--module-dir is required"
+ print("--module-dir is required", file=sys.stderr)
sys.exit(1)
if not os.path.exists(options.module_dir):
- print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir
+ print("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
sys.exit(1)
if not options.template_dir:
- print "--template-dir must be specified"
+ print("--template-dir must be specified")
sys.exit(1)
#####################################################################################
diff --git a/hacking/test-module b/hacking/test-module
index bdb91d0d5b..543e803b97 100755
--- a/hacking/test-module
+++ b/hacking/test-module
@@ -119,7 +119,7 @@ def boilerplate_module(modfile, args, interpreter, check, destfile):
task_vars = {}
if interpreter:
if '=' not in interpreter:
- print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python'
+ print("interpreter must by in the form of ansible_python_interpreter=/usr/bin/python")
sys.exit(1)
interpreter_type, interpreter_path = interpreter.split('=')
if not interpreter_type.startswith('ansible_'):
@@ -138,8 +138,8 @@ def boilerplate_module(modfile, args, interpreter, check, destfile):
)
modfile2_path = os.path.expanduser(destfile)
- print "* including generated source, if any, saving to: %s" % modfile2_path
- print "* this may offset any line numbers in tracebacks/debuggers!"
+ print("* including generated source, if any, saving to: %s" % modfile2_path)
+ print("* this may offset any line numbers in tracebacks/debuggers!")
modfile2 = open(modfile2_path, 'w')
modfile2.write(module_data)
modfile2.close()
@@ -153,28 +153,28 @@ def runtest( modfile, argspath):
os.system("chmod +x %s" % modfile)
invoke = "%s" % (modfile)
- if argspath is not None:
+ if argspath is not None:
invoke = "%s %s" % (modfile, argspath)
cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
try:
- print "***********************************"
- print "RAW OUTPUT"
- print out
- print err
+ print("***********************************")
+ print("RAW OUTPUT")
+ print(out)
+ print(err)
results = json.loads(out)
except:
- print "***********************************"
- print "INVALID OUTPUT FORMAT"
- print out
+ print("***********************************")
+ print("INVALID OUTPUT FORMAT")
+ print(out)
traceback.print_exc()
sys.exit(1)
- print "***********************************"
- print "PARSED OUTPUT"
- print jsonify(results,format=True)
+ print("***********************************")
+ print("PARSED OUTPUT")
+ print(jsonify(results,format=True))
def rundebug(debugger, modfile, argspath):
"""Run interactively with console debugger."""
@@ -184,7 +184,7 @@ def rundebug(debugger, modfile, argspath):
else:
subprocess.call("%s %s" % (debugger, modfile), shell=True)
-def main():
+def main():
options, args = parse()
(modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check, options.filename)
@@ -202,7 +202,7 @@ def main():
rundebug(options.debugger, modfile, argspath)
else:
runtest(modfile, argspath)
-
+
if __name__ == "__main__":
main()
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index 9c9fa82458..8906f8134f 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -34,7 +34,6 @@ from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes
from ansible.utils.display import Display
-from ansible.utils.path import is_executable
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
@@ -260,8 +259,10 @@ class CLI(object):
dest='vault_password_file', help="vault password file", action="callback",
callback=CLI.expand_tilde, type=str)
parser.add_option('--new-vault-password-file',
- dest='new_vault_password_file', help="new vault password file for rekey", action="callback",
- callback=CLI.expand_tilde, type=str)
+ dest='new_vault_password_file', help="new vault password file for rekey", action="callback",
+ callback=CLI.expand_tilde, type=str)
+ parser.add_option('--output', default=None, dest='output_file',
+ help='output file name for encrypt or decrypt; use - for stdout')
if subset_opts:
@@ -303,7 +304,7 @@ class CLI(object):
if connect_opts:
- parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
+ parser.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
@@ -313,6 +314,8 @@ class CLI(object):
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
+ parser.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
+ help="specify extra arguments to pass to ssh (e.g. ProxyCommand)")
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
@@ -475,7 +478,7 @@ class CLI(object):
return t
@staticmethod
- def read_vault_password_file(vault_password_file):
+ def read_vault_password_file(vault_password_file, loader):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
@@ -485,7 +488,7 @@ class CLI(object):
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
- if is_executable(this_path):
+ if loader.is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
index 80318eadf2..9b6957cfbb 100644
--- a/lib/ansible/cli/adhoc.py
+++ b/lib/ansible/cli/adhoc.py
@@ -95,13 +95,16 @@ class AdHocCLI(CLI):
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
+ loader = DataLoader()
+
if self.options.vault_password_file:
# read vault_pass from a file
- vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
+ vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
+ loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
+ loader.set_vault_password(vault_pass)
- loader = DataLoader(vault_password=vault_pass)
variable_manager = VariableManager()
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index ea77d4e2b9..e6457342f3 100644
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -24,6 +24,8 @@ import termios
import traceback
import textwrap
+from six import iteritems
+
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
@@ -92,7 +94,7 @@ class DocCLI(CLI):
continue
try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
self.display.vvv(traceback.print_exc())
self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
@@ -101,7 +103,7 @@ class DocCLI(CLI):
if doc is not None:
all_keys = []
- for (k,v) in doc['options'].iteritems():
+ for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
@@ -120,7 +122,7 @@ class DocCLI(CLI):
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
- except Exception, e:
+ except Exception as e:
self.display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
@@ -233,7 +235,10 @@ class DocCLI(CLI):
text = []
text.append("> %s\n" % doc['module'].upper())
- desc = " ".join(doc['description'])
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc['description'])
+ else:
+ desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
@@ -250,7 +255,10 @@ class DocCLI(CLI):
text.append("%s %s" % (opt_leadin, o))
- desc = " ".join(opt['description'])
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc['description'])
+ else:
+ desc = doc['description']
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index f8e812cac1..84a0bce2a1 100644
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -38,7 +38,6 @@ from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.playbook.role.requirement import RoleRequirement
-
class GalaxyCLI(CLI):
VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search")
@@ -353,6 +352,7 @@ class GalaxyCLI(CLI):
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
+ force = self.get_opt('force', False)
roles_path = self.get_opt("roles_path")
roles_done = []
@@ -360,6 +360,7 @@ class GalaxyCLI(CLI):
if role_file:
self.display.debug('Getting roles from %s' % role_file)
try:
+ self.display.debug('Processing role file: %s' % role_file)
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
@@ -389,13 +390,17 @@ class GalaxyCLI(CLI):
role = roles_left.pop(0)
role_path = role.path
- self.display.debug('Installing role %s' % role_path)
+ if role.install_info is not None and not force:
+ self.display.display('- %s is already installed, skipping.' % role.name)
+ continue
if role_path:
self.options.roles_path = role_path
else:
self.options.roles_path = roles_path
+ self.display.debug('Installing role %s from %s' % (role.name, self.options.roles_path))
+
tmp_file = None
installed = False
if role.src and os.path.isfile(role.src):
@@ -404,7 +409,7 @@ class GalaxyCLI(CLI):
else:
if role.scm:
# create tar file from scm url
- tmp_file = scm_archive_role(role.scm, role.src, role.version, role.name)
+ tmp_file = GalaxyRole.scm_archive_role(role.scm, role.src, role.version, role.name)
if role.src:
if '://' not in role.src:
role_data = self.api.lookup_role_by_name(role.src)
@@ -442,25 +447,20 @@ class GalaxyCLI(CLI):
os.unlink(tmp_file)
# install dependencies, if we want them
if not no_deps and installed:
- if not role_data:
- role_data = gr.get_metadata(role.get("name"), options)
- role_dependencies = role_data['dependencies']
- else:
- role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
+ role_dependencies = role.metadata.get('dependencies', [])
for dep in role_dependencies:
self.display.debug('Installing dep %s' % dep)
- if isinstance(dep, basestring):
- dep = ansible.utils.role_spec_parse(dep)
- else:
- dep = ansible.utils.role_yaml_parse(dep)
- if not get_role_metadata(dep["name"], options):
- if dep not in roles_left:
- self.display.display('- adding dependency: %s' % dep["name"])
- roles_left.append(dep)
+ dep_req = RoleRequirement()
+ __, dep_name, __ = dep_req.parse(dep)
+ dep_role = GalaxyRole(self.galaxy, name=dep_name)
+ if dep_role.install_info is None or force:
+ if dep_role not in roles_left:
+ self.display.display('- adding dependency: %s' % dep_name)
+ roles_left.append(GalaxyRole(self.galaxy, name=dep_name))
else:
- self.display.display('- dependency %s already pending installation.' % dep["name"])
+ self.display.display('- dependency %s already pending installation.' % dep_name)
else:
- self.display.display('- dependency %s is already installed, skipping.' % dep["name"])
+ self.display.display('- dependency %s is already installed, skipping.' % dep_name)
if not tmp_file or not installed:
self.display.warning("- %s was NOT installed successfully." % role.name)
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
index d8e7f6761b..306c134790 100644
--- a/lib/ansible/cli/playbook.py
+++ b/lib/ansible/cli/playbook.py
@@ -89,13 +89,15 @@ class PlaybookCLI(CLI):
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
+ loader = DataLoader()
+
if self.options.vault_password_file:
# read vault_pass from a file
- vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
+ vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
+ loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
-
- loader = DataLoader(vault_password=vault_pass)
+ loader.set_vault_password(vault_pass)
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
index 3b61c473c3..0c33568d3d 100644
--- a/lib/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -196,7 +196,7 @@ class PullCLI(CLI):
os.chdir('/')
try:
shutil.rmtree(self.options.dest)
- except Exception, e:
+ except Exception as e:
self.display.error("Failed to remove %s: %s" % (self.options.dest, str(e)))
return rc
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
index 393bbdd50b..086c55e35e 100644
--- a/lib/ansible/cli/vault.py
+++ b/lib/ansible/cli/vault.py
@@ -22,6 +22,7 @@ import traceback
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.parsing import DataLoader
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.utils.display import Display
@@ -30,7 +31,6 @@ class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
- CIPHER = 'AES256'
def __init__(self, args, display=None):
@@ -64,66 +64,81 @@ class VaultCLI(CLI):
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
- if len(self.args) == 0:
- raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
+ can_output = ['encrypt', 'decrypt']
+
+ if self.action not in can_output:
+ if self.options.output_file:
+ raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output))
+ if len(self.args) == 0:
+ raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
+ else:
+ # This restriction should remain in place until it's possible to
+ # load multiple YAML records from a single file, or it's too easy
+ # to create an encrypted file that can't be read back in. But in
+ # the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
+ # a workaround.
+ if self.options.output_file and len(self.args) > 1:
+ raise AnsibleOptionsError("At most one input file may be used with the --output option")
def run(self):
super(VaultCLI, self).run()
+ loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
- self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
+ self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
else:
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
if self.options.new_vault_password_file:
# for rekey only
- self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file)
+ self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
+ self.editor = VaultEditor(self.vault_pass)
+
self.execute()
- def execute_create(self):
+ def execute_encrypt(self):
- if len(self.args) > 1:
- raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
+ if len(self.args) == 0 and sys.stdin.isatty():
+ self.display.display("Reading plaintext input from stdin", stderr=True)
+
+ for f in self.args or ['-']:
+ self.editor.encrypt_file(f, output_file=self.options.output_file)
- cipher = getattr(self.options, 'cipher', self.CIPHER)
- this_editor = VaultEditor(cipher, self.vault_pass, self.args[0])
- this_editor.create_file()
+ if sys.stdout.isatty():
+ self.display.display("Encryption successful", stderr=True)
def execute_decrypt(self):
- cipher = getattr(self.options, 'cipher', self.CIPHER)
- for f in self.args:
- this_editor = VaultEditor(cipher, self.vault_pass, f)
- this_editor.decrypt_file()
+ if len(self.args) == 0 and sys.stdin.isatty():
+ self.display.display("Reading ciphertext input from stdin", stderr=True)
- self.display.display("Decryption successful")
+ for f in self.args or ['-']:
+ self.editor.decrypt_file(f, output_file=self.options.output_file)
- def execute_edit(self):
+ if sys.stdout.isatty():
+ self.display.display("Decryption successful", stderr=True)
- for f in self.args:
- this_editor = VaultEditor(None, self.vault_pass, f)
- this_editor.edit_file()
+ def execute_create(self):
- def execute_view(self):
+ if len(self.args) > 1:
+ raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
+ self.editor.create_file(self.args[0])
+
+ def execute_edit(self):
for f in self.args:
- this_editor = VaultEditor(None, self.vault_pass, f)
- this_editor.view_file()
+ self.editor.edit_file(f)
- def execute_encrypt(self):
+ def execute_view(self):
- cipher = getattr(self.options, 'cipher', self.CIPHER)
for f in self.args:
- this_editor = VaultEditor(cipher, self.vault_pass, f)
- this_editor.encrypt_file()
-
- self.display.display("Encryption successful")
+ self.editor.view_file(f)
def execute_rekey(self):
for f in self.args:
@@ -136,7 +151,6 @@ class VaultCLI(CLI):
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
for f in self.args:
- this_editor = VaultEditor(None, self.vault_pass, f)
- this_editor.rekey_file(new_password)
+ self.editor.rekey_file(f, new_password)
- self.display.display("Rekey successful")
+ self.display.display("Rekey successful", stderr=True)
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 376d2d54ad..73401288ef 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -40,7 +40,16 @@ def mk_boolean(value):
else:
return False
-def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False):
+def shell_expand(path):
+ '''
+ shell_expand is needed as os.path.expanduser does not work
+ when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
+ '''
+ if path:
+ path = os.path.expanduser(os.path.expandvars(path))
+ return path
+
+def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
@@ -56,6 +65,8 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
elif isnone:
if value == "None":
value = None
+ elif ispath:
+ value = shell_expand(value)
elif isinstance(value, string_types):
value = unquote(value)
return value
@@ -96,13 +107,6 @@ def load_config_file():
return p, path
return None, ''
-def shell_expand_path(path):
- ''' shell_expand_path is needed as os.path.expanduser does not work
- when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
- if path:
- path = os.path.expanduser(os.path.expandvars(path))
- return path
-
p, CONFIG_FILE = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
@@ -114,13 +118,13 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# sections in config file
DEFAULTS='defaults'
-DEPRECATED_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts'))
+DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# generally configurable things
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST))
-DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
-DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
+DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
+DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True)
+DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
@@ -131,10 +135,10 @@ DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
-DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
+DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
-DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None))
+DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
@@ -146,7 +150,7 @@ DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBL
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
-DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
@@ -157,18 +161,19 @@ DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesyste
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
-DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
-DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
+DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
+DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
-DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
+DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas']
+BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
@@ -180,14 +185,15 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa
# PLUGINS
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True)
# paths
-DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
-DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
-DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
-DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
-DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
-DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
-DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
-DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins')
+DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True)
+DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True)
+DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', ispath=True)
+DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', ispath=True)
+DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', ispath=True)
+DEFAULT_INVENTORy_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', ispath=True)
+DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispath=True)
+DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispath=True)
+DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispath=True)
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
@@ -209,7 +215,7 @@ COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'AN
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
-RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
+RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/', ispath=True)
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True)
# CONNECTION RELATED
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
index 11ee68e7b6..c66cd2e3be 100644
--- a/lib/ansible/executor/module_common.py
+++ b/lib/ansible/executor/module_common.py
@@ -33,12 +33,13 @@ from ansible.errors import AnsibleError
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
-REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
-REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
-REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
-REPLACER_WINDOWS = "# POWERSHELL_COMMON"
-REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
-REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
+REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
+REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
+REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
+REPLACER_WINDOWS = "# POWERSHELL_COMMON"
+REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
+REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
+REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
@@ -68,6 +69,8 @@ def _find_snippet_imports(module_data, module_path, strip_comments):
module_style = 'new'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
+ elif REPLACER_JSONARGS in module_data:
+ module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
@@ -162,13 +165,14 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
- module_args_json = json.dumps(module_args)
- encoded_args = repr(module_args_json.encode('utf-8'))
+ module_args_json = json.dumps(module_args).encode('utf-8')
+ python_repred_args = repr(module_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
- module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
- module_data = module_data.replace(REPLACER_WINARGS, module_args_json.encode('utf-8'))
+ module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
+ module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
+ module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
@@ -188,7 +192,7 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal
interpreter = to_bytes(task_vars[interpreter_config], errors='strict')
lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
- if interpreter.startswith('python'):
+ if os.path.basename(interpreter).startswith('python'):
lines.insert(1, ENCODING_STRING)
else:
# No shebang, assume a binary module?
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index c6870e0224..f015c28db8 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -21,6 +21,8 @@ __metaclass__ = type
import fnmatch
+from six import iteritems
+
from ansible import constants as C
from ansible.errors import *
@@ -31,22 +33,31 @@ from ansible.utils.boolean import boolean
__all__ = ['PlayIterator']
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
+
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
- self.cur_block = 0
- self.cur_regular_task = 0
- self.cur_rescue_task = 0
- self.cur_always_task = 0
- self.cur_role = None
- self.run_state = PlayIterator.ITERATING_SETUP
- self.fail_state = PlayIterator.FAILED_NONE
- self.pending_setup = False
- self.child_state = None
+ self.cur_block = 0
+ self.cur_regular_task = 0
+ self.cur_rescue_task = 0
+ self.cur_always_task = 0
+ self.cur_role = None
+ self.run_state = PlayIterator.ITERATING_SETUP
+ self.fail_state = PlayIterator.FAILED_NONE
+ self.pending_setup = False
+ self.tasks_child_state = None
+ self.rescue_child_state = None
+ self.always_child_state = None
def __repr__(self):
- return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % (
+ return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
@@ -55,7 +66,9 @@ class HostState:
self.run_state,
self.fail_state,
self.pending_setup,
- self.child_state,
+ self.tasks_child_state,
+ self.rescue_child_state,
+ self.always_child_state,
)
def get_current_block(self):
@@ -71,7 +84,12 @@ class HostState:
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
- new_state.child_state = self.child_state
+ if self.tasks_child_state is not None:
+ new_state.tasks_child_state = self.tasks_child_state.copy()
+ if self.rescue_child_state is not None:
+ new_state.rescue_child_state = self.rescue_child_state.copy()
+ if self.always_child_state is not None:
+ new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
@@ -91,7 +109,7 @@ class PlayIterator:
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
- def __init__(self, inventory, play, play_context, all_vars):
+ def __init__(self, inventory, play, play_context, variable_manager, all_vars):
self._play = play
self._blocks = []
@@ -103,6 +121,10 @@ class PlayIterator:
self._host_states = {}
for host in inventory.get_hosts(self._play.hosts):
self._host_states[host.name] = HostState(blocks=self._blocks)
+ # if the host's name is in the variable manager's fact cache, then set
+ # its _gathered_facts flag to true for smart gathering tests later
+ if host.name in variable_manager._fact_cache:
+ host._gathered_facts = True
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None:
@@ -114,6 +136,8 @@ class PlayIterator:
break
else:
self.get_next_task_for_host(host)
+ # finally, reset the host's state to ITERATING_SETUP
+ self._host_states[host.name].run_state = self.ITERATING_SETUP
# Extend the play handlers list to include the handlers defined in roles
self._play.handlers.extend(play.compile_roles_handlers())
@@ -126,10 +150,12 @@ class PlayIterator:
def get_next_task_for_host(self, host, peek=False):
+ display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
+ display.debug("host %s is done iterating, returning" % host.name)
return (None, None)
elif s.run_state == self.ITERATING_SETUP:
s.run_state = self.ITERATING_TASKS
@@ -169,6 +195,9 @@ class PlayIterator:
if not peek:
self._host_states[host.name] = s
+ display.debug("done getting next task for host %s" % host.name)
+ display.debug(" ^ task is: %s" % task)
+ display.debug(" ^ state is: %s" % s)
return (s, task)
@@ -176,15 +205,6 @@ class PlayIterator:
task = None
- # if we previously encountered a child block and we have a
- # saved child state, try and get the next task from there
- if state.child_state:
- (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
- if task:
- return (state.child_state, task)
- else:
- state.child_state = None
-
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
@@ -207,7 +227,23 @@ class PlayIterator:
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
- state.cur_regular_task += 1
+ # if the current task is actually a child block, we dive into it
+ if isinstance(task, Block) or state.tasks_child_state is not None:
+ if state.tasks_child_state is None:
+ state.tasks_child_state = HostState(blocks=[task])
+ state.tasks_child_state.run_state = self.ITERATING_TASKS
+ state.tasks_child_state.cur_role = state.cur_role
+ (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, peek=peek)
+ if task is None:
+ # check to see if the child state was failed, if so we need to
+ # fail here too so we don't continue iterating tasks
+ if state.tasks_child_state.fail_state != self.FAILED_NONE:
+ state.fail_state |= self.FAILED_TASKS
+ state.tasks_child_state = None
+ state.cur_regular_task += 1
+ continue
+ else:
+ state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
@@ -218,7 +254,22 @@ class PlayIterator:
state.run_state = self.ITERATING_ALWAYS
else:
task = block.rescue[state.cur_rescue_task]
- state.cur_rescue_task += 1
+ if isinstance(task, Block) or state.rescue_child_state is not None:
+ if state.rescue_child_state is None:
+ state.rescue_child_state = HostState(blocks=[task])
+ state.rescue_child_state.run_state = self.ITERATING_TASKS
+ state.rescue_child_state.cur_role = state.cur_role
+ (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, peek=peek)
+ if task is None:
+ # check to see if the child state was failed, if so we need to
+ # fail here too so we don't continue iterating rescue
+ if state.rescue_child_state.fail_state != self.FAILED_NONE:
+ state.fail_state |= self.FAILED_RESCUE
+ state.rescue_child_state = None
+ state.cur_rescue_task += 1
+ continue
+ else:
+ state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
if state.cur_always_task >= len(block.always):
@@ -233,42 +284,63 @@ class PlayIterator:
state.child_state = None
else:
task = block.always[state.cur_always_task]
- state.cur_always_task += 1
+ if isinstance(task, Block) or state.always_child_state is not None:
+ if state.always_child_state is None:
+ state.always_child_state = HostState(blocks=[task])
+ state.always_child_state.run_state = self.ITERATING_TASKS
+ state.always_child_state.cur_role = state.cur_role
+ (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, peek=peek)
+ if task is None:
+ # check to see if the child state was failed, if so we need to
+ # fail here too so we don't continue iterating always
+ if state.always_child_state.fail_state != self.FAILED_NONE:
+ state.fail_state |= self.FAILED_ALWAYS
+ state.always_child_state = None
+ state.cur_always_task += 1
+ continue
+ else:
+ state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
- # if the current task is actually a child block, we dive into it
- if isinstance(task, Block):
- state.child_state = HostState(blocks=[task])
- state.child_state.run_state = self.ITERATING_TASKS
- state.child_state.cur_role = state.cur_role
- (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
-
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
+ def _set_failed_state(self, state):
+ if state.pending_setup:
+ state.fail_state |= self.FAILED_SETUP
+ state.run_state = self.ITERATING_COMPLETE
+ elif state.run_state == self.ITERATING_TASKS:
+ if state.tasks_child_state is not None:
+ state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
+ else:
+ state.fail_state |= self.FAILED_TASKS
+ state.run_state = self.ITERATING_RESCUE
+ elif state.run_state == self.ITERATING_RESCUE:
+ if state.rescue_child_state is not None:
+ state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
+ else:
+ state.fail_state |= self.FAILED_RESCUE
+ state.run_state = self.ITERATING_ALWAYS
+ elif state.run_state == self.ITERATING_ALWAYS:
+ if state.always_child_state is not None:
+ state.always_child_state = self._set_failed_state(state.always_child_state)
+ else:
+ state.fail_state |= self.FAILED_ALWAYS
+ state.run_state = self.ITERATING_COMPLETE
+ return state
+
def mark_host_failed(self, host):
s = self.get_host_state(host)
- if s.pending_setup:
- s.fail_state |= self.FAILED_SETUP
- s.run_state = self.ITERATING_COMPLETE
- elif s.run_state == self.ITERATING_TASKS:
- s.fail_state |= self.FAILED_TASKS
- s.run_state = self.ITERATING_RESCUE
- elif s.run_state == self.ITERATING_RESCUE:
- s.fail_state |= self.FAILED_RESCUE
- s.run_state = self.ITERATING_ALWAYS
- elif s.run_state == self.ITERATING_ALWAYS:
- s.fail_state |= self.FAILED_ALWAYS
- s.run_state = self.ITERATING_COMPLETE
+ s = self._set_failed_state(s)
self._host_states[host.name] = s
def get_failed_hosts(self):
- return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
+ return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
def get_original_task(self, host, task):
'''
@@ -278,34 +350,38 @@ class PlayIterator:
allows us to find the original task passed into the executor engine.
'''
def _search_block(block, task):
- for t in block.block:
- if isinstance(t, Block):
- res = _search_block(t, task)
+ '''
+ helper method to check a block's task lists (block/rescue/always)
+ for a given task uuid. If a Block is encountered in the place of a
+ task, it will be recursively searched (this happens when a task
+ include inserts one or more blocks into a task list).
+ '''
+ for b in (block.block, block.rescue, block.always):
+ for t in b:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ return None
+
+ def _search_state(state, task):
+ for block in state._blocks:
+ res = _search_block(block, task)
+ if res:
+ return res
+ for child_state in (state.tasks_child_state, state.rescue_child_state, state.always_child_state):
+ if child_state is not None:
+ res = _search_state(child_state, task)
if res:
return res
- elif t._uuid == task._uuid:
- return t
- for t in block.rescue:
- if isinstance(t, Block):
- res = _search_block(t, task)
- if res:
- return res
- elif t._uuid == task._uuid:
- return t
- for t in block.always:
- if isinstance(t, Block):
- res = _search_block(t, task)
- if res:
- return res
- elif t._uuid == task._uuid:
- return t
return None
s = self.get_host_state(host)
- for block in s._blocks:
- res = _search_block(block, task)
- if res:
- return res
+ res = _search_state(s, task)
+ if res:
+ return res
for block in self._play.handlers:
res = _search_block(block, task)
@@ -314,23 +390,36 @@ class PlayIterator:
return None
+ def _insert_tasks_into_state(self, state, task_list):
+ if state.run_state == self.ITERATING_TASKS:
+ if state.tasks_child_state:
+ state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
+ before = target_block.block[:state.cur_regular_task]
+ after = target_block.block[state.cur_regular_task:]
+ target_block.block = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == self.ITERATING_RESCUE:
+ if state.rescue_child_state:
+ state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
+ before = target_block.rescue[:state.cur_rescue_task]
+ after = target_block.rescue[state.cur_rescue_task:]
+ target_block.rescue = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == self.ITERATING_ALWAYS:
+ if state.always_child_state:
+ state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
+ before = target_block.always[:state.cur_always_task]
+ after = target_block.always[state.cur_always_task:]
+ target_block.always = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ return state
+
def add_tasks(self, host, task_list):
- s = self.get_host_state(host)
- target_block = s._blocks[s.cur_block].copy(exclude_parent=True)
-
- if s.run_state == self.ITERATING_TASKS:
- before = target_block.block[:s.cur_regular_task]
- after = target_block.block[s.cur_regular_task:]
- target_block.block = before + task_list + after
- elif s.run_state == self.ITERATING_RESCUE:
- before = target_block.rescue[:s.cur_rescue_task]
- after = target_block.rescue[s.cur_rescue_task:]
- target_block.rescue = before + task_list + after
- elif s.run_state == self.ITERATING_ALWAYS:
- before = target_block.always[:s.cur_always_task]
- after = target_block.always[s.cur_always_task:]
- target_block.always = before + task_list + after
-
- s._blocks[s.cur_block] = target_block
- self._host_states[host.name] = s
+ self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
index 39b89de10b..b35333789b 100644
--- a/lib/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -50,6 +50,7 @@ class PlaybookExecutor:
self._display = display
self._options = options
self.passwords = passwords
+ self._unreachable_hosts = dict()
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
@@ -87,14 +88,10 @@ class PlaybookExecutor:
if play.vars_prompt:
for var in play.vars_prompt:
- if 'name' not in var:
- raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds)
-
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
-
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
@@ -121,6 +118,7 @@ class PlaybookExecutor:
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
+ self._tqm._unreachable_hosts.update(self._unreachable_hosts)
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
@@ -128,16 +126,32 @@ class PlaybookExecutor:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
break
+
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
- # if the last result wasn't zero, break out of the serial batch loop
- if result != 0:
+
+ # check the number of failures here, to see if they're above the maximum
+ # failure percentage allowed, or if any errors are fatal. If either of those
+ # conditions are met, we break out, otherwise we only break out if the entire
+ # batch failed
+ failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
+ if new_play.any_errors_fatal and failed_hosts_count > 0:
break
+ elif new_play.max_fail_percentage is not None and \
+ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
+ break
+ elif len(batch) == failed_hosts_count:
+ break
+
+ # clear the failed hosts dictionaires in the TQM for the next batch
+ self._unreachable_hosts.update(self._tqm._unreachable_hosts)
+ self._tqm.clear_failed_hosts()
- # if the last result wasn't zero, break out of the play loop
- if result != 0:
+ # if the last result wasn't zero or 3 (some hosts were unreachable),
+ # break out of the serial batch loop
+ if result not in (0, 3):
break
i = i + 1 # per play
diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py
index 3ed2a28e80..26ecc2a640 100644
--- a/lib/ansible/executor/process/result.py
+++ b/lib/ansible/executor/process/result.py
@@ -20,6 +20,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue
+from six import iteritems, text_type
+
import multiprocessing
import os
import signal
@@ -60,7 +62,7 @@ class ResultProcess(multiprocessing.Process):
super(ResultProcess, self).__init__()
def _send_result(self, result):
- debug(u"sending result: %s" % ([unicode(x) for x in result],))
+ debug(u"sending result: %s" % ([text_type(x) for x in result],))
self._final_q.put(result, block=False)
debug("done sending result")
@@ -156,8 +158,8 @@ class ResultProcess(multiprocessing.Process):
elif 'ansible_facts' in result_item:
# if this task is registering facts, do that now
item = result_item.get('item', None)
- if result._task.action in ('set_fact', 'include_vars'):
- for (key, value) in result_item['ansible_facts'].iteritems():
+ if result._task.action == 'include_vars':
+ for (key, value) in iteritems(result_item['ansible_facts']):
self._send_result(('set_host_var', result._host, result._task, item, key, value))
else:
self._send_result(('set_host_facts', result._host, result._task, item, result_item['ansible_facts']))
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
index 5fb4d6250b..afd02800c3 100644
--- a/lib/ansible/executor/process/worker.py
+++ b/lib/ansible/executor/process/worker.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue
+
import multiprocessing
import os
import signal
@@ -27,6 +28,8 @@ import sys
import time
import traceback
+from jinja2.exceptions import TemplateNotFound
+
# TODO: not needed if we use the cryptography library with its default RNG
# engine
HAS_ATFORK=True
@@ -67,7 +70,7 @@ class WorkerProcess(multiprocessing.Process):
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
+ except OSError as e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
@@ -127,8 +130,6 @@ class WorkerProcess(multiprocessing.Process):
except queue.Empty:
pass
- except (IOError, EOFError, KeyboardInterrupt):
- break
except AnsibleConnectionFailure:
try:
if task:
@@ -137,16 +138,19 @@ class WorkerProcess(multiprocessing.Process):
except:
# FIXME: most likely an abort, catch those kinds of errors specifically
break
- except Exception, e:
- debug("WORKER EXCEPTION: %s" % e)
- debug("WORKER EXCEPTION: %s" % traceback.format_exc())
- try:
- if task:
- task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
- self._rslt_q.put(task_result, block=False)
- except:
- # FIXME: most likely an abort, catch those kinds of errors specifically
+ except Exception as e:
+ if isinstance(e, (IOError, EOFError, KeyboardInterrupt)) and not isinstance(e, TemplateNotFound):
break
+ else:
+ try:
+ if task:
+ task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
+ self._rslt_q.put(task_result, block=False)
+ except:
+ debug("WORKER EXCEPTION: %s" % e)
+ debug("WORKER EXCEPTION: %s" % traceback.format_exc())
+ # FIXME: most likely an abort, catch those kinds of errors specifically
+ break
debug("WORKER PROCESS EXITING")
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index 4b372cf4c7..afe96057bf 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -25,14 +25,17 @@ import subprocess
import sys
import time
+from jinja2.runtime import Undefined
+from six import iteritems
+
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
-from ansible.plugins import connection_loader, action_loader
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode
+from ansible.vars.unsafe_proxy import UnsafeProxy
from ansible.utils.debug import debug
@@ -121,18 +124,29 @@ class TaskExecutor:
if 'changed' not in res:
res['changed'] = False
+ def _clean_res(res):
+ if isinstance(res, dict):
+ for k in res.keys():
+ res[k] = _clean_res(res[k])
+ elif isinstance(res, list):
+ for idx,item in enumerate(res):
+ res[idx] = _clean_res(item)
+ elif isinstance(res, UnsafeProxy):
+ return res._obj
+ return res
+
debug("dumping result to json")
- result = json.dumps(res)
+ res = _clean_res(res)
debug("done dumping result, returning")
- return result
- except AnsibleError, e:
+ return res
+ except AnsibleError as e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
finally:
try:
self._connection.close()
except AttributeError:
pass
- except Exception, e:
+ except Exception as e:
debug("error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
@@ -166,6 +180,11 @@ class TaskExecutor:
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
+ if items:
+ from ansible.vars.unsafe_proxy import UnsafeProxy
+ for idx, item in enumerate(items):
+ if item is not None and not isinstance(item, UnsafeProxy):
+ items[idx] = UnsafeProxy(item)
return items
def _run_loop(self, items):
@@ -187,15 +206,18 @@ class TaskExecutor:
try:
tmp_task = self._task.copy()
- except AnsibleParserError, e:
+ tmp_play_context = self._play_context.copy()
+ except AnsibleParserError as e:
results.append(dict(failed=True, msg=str(e)))
continue
- # now we swap the internal task with the copy, execute,
- # and swap them back so we can do the next iteration cleanly
+ # now we swap the internal task and play context with their copies,
+ # execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
+ (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
+ (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# now update the result with the item info, and append the result
# to the list of results
@@ -241,11 +263,10 @@ class TaskExecutor:
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
- self._play_context.post_validate(templar=templar)
-
- # now that the play context is finalized, we can add 'magic'
- # variables to the variable dictionary
+ # We also add "magic" variables back into the variables dict to make sure
+ # a certain subset of variables exist.
self._play_context.update_vars(variables)
+ self._play_context.post_validate(templar=templar)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
@@ -262,6 +283,7 @@ class TaskExecutor:
if self._task.action == 'debug' and 'var' in self._task.args:
prev_var = self._task.args.pop('var')
+ original_args = self._task.args.copy()
self._task.post_validate(templar=templar)
if '_variable_params' in self._task.args:
variable_params = self._task.args.pop('_variable_params')
@@ -276,7 +298,7 @@ class TaskExecutor:
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
- include_variables = self._task.args.copy()
+ include_variables = original_args
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
return dict(include=include_file, include_variables=include_variables)
@@ -290,7 +312,7 @@ class TaskExecutor:
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
- self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems()))
+ self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
# Read some values from the task, so that we can modify them if need be
retries = self._task.retries
@@ -314,7 +336,10 @@ class TaskExecutor:
result['attempts'] = attempt + 1
debug("running the handler")
- result = self._handler.run(task_vars=variables)
+ try:
+ result = self._handler.run(task_vars=variables)
+ except AnsibleConnectionFailure as e:
+ return dict(unreachable=True, msg=str(e))
debug("handler run complete")
if self._task.async > 0:
@@ -339,20 +364,28 @@ class TaskExecutor:
# create a conditional object to evaluate task conditions
cond = Conditional(loader=self._loader)
- # FIXME: make sure until is mutually exclusive with changed_when/failed_when
- if self._task.until:
- cond.when = self._task.until
- if cond.evaluate_conditional(templar, vars_copy):
- break
- elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result:
- if self._task.changed_when:
+ def _evaluate_changed_when_result(result):
+ if self._task.changed_when is not None:
cond.when = [ self._task.changed_when ]
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
- if self._task.failed_when:
+
+ def _evaluate_failed_when_result(result):
+ if self._task.failed_when is not None:
cond.when = [ self._task.failed_when ]
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
- if failed_when_result:
+ return failed_when_result
+ return False
+
+ if self._task.until:
+ cond.when = self._task.until
+ if cond.evaluate_conditional(templar, vars_copy):
+ _evaluate_changed_when_result(result)
+ _evaluate_failed_when_result(result)
+ break
+ elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result:
+ _evaluate_changed_when_result(result)
+ if _evaluate_failed_when_result(result):
break
elif 'failed' not in result:
if result.get('rc', 0) != 0:
@@ -363,6 +396,9 @@ class TaskExecutor:
if attempt < retries - 1:
time.sleep(delay)
+ else:
+ _evaluate_changed_when_result(result)
+ _evaluate_failed_when_result(result)
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
@@ -400,7 +436,7 @@ class TaskExecutor:
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
- normal_handler = action_loader.get(
+ normal_handler = self._shared_loader_obj.action_loader.get(
'normal',
task=async_task,
connection=self._connection,
@@ -434,10 +470,21 @@ class TaskExecutor:
# FIXME: calculation of connection params/auth stuff should be done here
if not self._play_context.remote_addr:
- self._play_context.remote_addr = self._host.ipv4_address
+ self._play_context.remote_addr = self._host.address
if self._task.delegate_to is not None:
- self._compute_delegate(variables)
+ # since we're delegating, we don't want to use interpreter values
+ # which would have been set for the original target host
+ for i in variables.keys():
+ if i.startswith('ansible_') and i.endswith('_interpreter'):
+ del variables[i]
+ # now replace the interpreter values with those that may have come
+ # from the delegated-to host
+ delegated_vars = variables.get('ansible_delegated_vars', dict())
+ if isinstance(delegated_vars, dict):
+ for i in delegated_vars:
+ if i.startswith("ansible_") and i.endswith("_interpreter"):
+ variables[i] = delegated_vars[i]
conn_type = self._play_context.connection
if conn_type == 'smart':
@@ -452,12 +499,12 @@ class TaskExecutor:
try:
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
- if "Bad configuration option" in err:
+ if "Bad configuration option" in err or "Usage:" in err:
conn_type = "paramiko"
except OSError:
conn_type = "paramiko"
- connection = connection_loader.get(conn_type, self._play_context, self._new_stdin)
+ connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
@@ -468,7 +515,7 @@ class TaskExecutor:
Returns the correct action plugin to handle the requestion task action
'''
- if self._task.action in action_loader:
+ if self._task.action in self._shared_loader_obj.action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % module_name)
handler_name = self._task.action
@@ -477,7 +524,7 @@ class TaskExecutor:
else:
handler_name = 'async'
- handler = action_loader.get(
+ handler = self._shared_loader_obj.action_loader.get(
handler_name,
task=self._task,
connection=connection,
@@ -492,41 +539,3 @@ class TaskExecutor:
return handler
- def _compute_delegate(self, variables):
-
- # get the vars for the delegate by its name
- try:
- self._display.debug("Delegating to %s" % self._task.delegate_to)
- this_info = variables['hostvars'][self._task.delegate_to]
-
- # get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
- self._play_context.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
- self._play_context.remote_user = this_info.get('ansible_remote_user', self._task.remote_user)
- self._play_context.port = this_info.get('ansible_ssh_port', self._play_context.port)
- self._play_context.password = this_info.get('ansible_ssh_pass', self._play_context.password)
- self._play_context.private_key_file = this_info.get('ansible_ssh_private_key_file', self._play_context.private_key_file)
- self._play_context.become_pass = this_info.get('ansible_sudo_pass', self._play_context.become_pass)
-
- conn = this_info.get('ansible_connection', self._task.connection)
- if conn:
- self._play_context.connection = conn
-
- except Exception as e:
- # make sure the inject is empty for non-inventory hosts
- this_info = {}
- self._display.debug("Delegate due to: %s" % str(e))
-
- # Last chance to get private_key_file from global variables.
- # this is useful if delegated host is not defined in the inventory
- if self._play_context.private_key_file is None:
- self._play_context.private_key_file = this_info.get('ansible_ssh_private_key_file', None)
-
- if self._play_context.private_key_file is None:
- key = this_info.get('private_key_file', None)
- if key:
- self._play_context.private_key_file = os.path.expanduser(key)
-
- for i in this_info:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- variables[i] = this_info[i]
-
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
index b023200a51..0b6422218c 100644
--- a/lib/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -23,6 +23,7 @@ import multiprocessing
import os
import socket
import sys
+import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
@@ -86,6 +87,10 @@ class TaskQueueManager:
except ValueError:
fileno = None
+ # A temporary file (opened pre-fork) used by connection
+ # plugins for inter-process locking.
+ self._connection_lockfile = tempfile.TemporaryFile()
+
self._workers = []
for i in range(self._options.forks):
main_q = multiprocessing.Queue()
@@ -176,7 +181,7 @@ class TaskQueueManager:
new_play = play.copy()
new_play.post_validate(templar)
- play_context = PlayContext(new_play, self._options, self.passwords)
+ play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
@@ -192,7 +197,13 @@ class TaskQueueManager:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
- iterator = PlayIterator(inventory=self._inventory, play=new_play, play_context=play_context, all_vars=all_vars)
+ iterator = PlayIterator(
+ inventory=self._inventory,
+ play=new_play,
+ play_context=play_context,
+ variable_manager=self._variable_manager,
+ all_vars=all_vars,
+ )
# and run the play using the strategy
return strategy.run(iterator, play_context)
@@ -210,6 +221,9 @@ class TaskQueueManager:
main_q.close()
worker_prc.terminate()
+ def clear_failed_hosts(self):
+ self._failed_hosts = dict()
+
def get_inventory(self):
return self._inventory
diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
index 3b89dac847..90caed6e22 100644
--- a/lib/ansible/galaxy/__init__.py
+++ b/lib/ansible/galaxy/__init__.py
@@ -22,6 +22,8 @@
import os
+from six import string_types
+
from ansible.errors import AnsibleError
from ansible.utils.display import Display
@@ -40,9 +42,9 @@ class Galaxy(object):
self.display = display
self.options = options
- self.roles_path = getattr(self.options, 'roles_path', None)
- if self.roles_path:
- self.roles_path = os.path.expanduser(self.roles_path)
+ roles_paths = getattr(self.options, 'roles_path', [])
+ if isinstance(roles_paths, string_types):
+ self.roles_paths = [os.path.expanduser(roles_path) for roles_path in roles_paths.split(os.pathsep)]
self.roles = {}
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
index b6f6c3bca2..43d378e0a7 100644
--- a/lib/ansible/galaxy/api.py
+++ b/lib/ansible/galaxy/api.py
@@ -21,10 +21,11 @@
#
########################################################################
import json
-from urllib2 import urlopen, quote as urlquote, HTTPError
+from urllib2 import quote as urlquote, HTTPError
from urlparse import urlparse
from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils.urls import open_url
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
@@ -61,7 +62,7 @@ class GalaxyAPI(object):
return 'v1'
try:
- data = json.load(urlopen(api_server))
+ data = json.load(open_url(api_server))
return data.get("current_version", 'v1')
except Exception as e:
# TODO: report error
@@ -85,7 +86,7 @@ class GalaxyAPI(object):
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
self.galaxy.display.vvvv("- %s" % (url))
try:
- data = json.load(urlopen(url))
+ data = json.load(open_url(url))
if len(data["results"]) != 0:
return data["results"][0]
except:
@@ -102,13 +103,13 @@ class GalaxyAPI(object):
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
- data = json.load(urlopen(url))
+ data = json.load(open_url(url))
results = data['results']
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
- data = json.load(urlopen(url))
+ data = json.load(open_url(url))
results += data['results']
done = (data.get('next', None) == None)
return results
@@ -122,7 +123,7 @@ class GalaxyAPI(object):
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
- data = json.load(urlopen(url))
+ data = json.load(open_url(url))
if "results" in data:
results = data['results']
else:
@@ -133,7 +134,7 @@ class GalaxyAPI(object):
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
- data = json.load(urlopen(url))
+ data = json.load(open_url(url))
results += data['results']
done = (data.get('next', None) == None)
return results
@@ -165,7 +166,7 @@ class GalaxyAPI(object):
self.galaxy.display.debug("Executing query: %s" % search_url)
try:
- data = json.load(urlopen(search_url))
+ data = json.load(open_url(search_url))
except HTTPError as e:
raise AnsibleError("Unsuccessful request to server: %s" % str(e))
diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2
index c6b6fd9dbd..9b4e922ff4 100644
--- a/lib/ansible/galaxy/data/metadata_template.j2
+++ b/lib/ansible/galaxy/data/metadata_template.j2
@@ -20,7 +20,7 @@ galaxy_info:
# platform on this list, let us know and we'll get it added!
#
#platforms:
- {%- for platform,versions in platforms.iteritems() %}
+ {%- for platform,versions in platforms.items() %}
#- name: {{ platform }}
# versions:
# - all
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index 3a58ccb6d1..ea6debb813 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -26,10 +26,16 @@ import tarfile
import tempfile
import yaml
from shutil import rmtree
-from urllib2 import urlopen
from ansible import constants as C
from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
class GalaxyRole(object):
@@ -39,68 +45,32 @@ class GalaxyRole(object):
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
- def __init__(self, galaxy, name, src=None, version=None, scm=None):
+ def __init__(self, galaxy, name, src=None, version=None, scm=None, role_path=None):
self._metadata = None
self._install_info = None
self.options = galaxy.options
- self.display = galaxy.display
self.name = name
self.version = version
self.src = src or name
self.scm = scm
- self.path = (os.path.join(galaxy.roles_path, self.name))
-
- def fetch_from_scm_archive(self):
-
- # this can be configured to prevent unwanted SCMS but cannot add new ones unless the code is also updated
- if scm not in self.scms:
- self.display.display("The %s scm is not currently supported" % scm)
- return False
-
- tempdir = tempfile.mkdtemp()
- clone_cmd = [scm, 'clone', role_url, self.name]
- with open('/dev/null', 'w') as devnull:
- try:
- self.display.display("- executing: %s" % " ".join(clone_cmd))
- popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
- except:
- raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
- rc = popen.wait()
- if rc != 0:
- self.display.display("- command %s failed" % ' '.join(clone_cmd))
- self.display.display(" in directory %s" % tempdir)
- return False
-
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
- if scm == 'hg':
- archive_cmd = ['hg', 'archive', '--prefix', "%s/" % self.name]
- if role_version:
- archive_cmd.extend(['-r', role_version])
- archive_cmd.append(temp_file.name)
- if scm == 'git':
- archive_cmd = ['git', 'archive', '--prefix=%s/' % self.name, '--output=%s' % temp_file.name]
- if role_version:
- archive_cmd.append(role_version)
+ if role_path is not None:
+ self.path = role_path
+ else:
+ for path in galaxy.roles_paths:
+ role_path = os.path.join(path, self.name)
+ if os.path.exists(role_path):
+ self.path = role_path
+ break
else:
- archive_cmd.append('HEAD')
-
- with open('/dev/null', 'w') as devnull:
- self.display.display("- executing: %s" % " ".join(archive_cmd))
- popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, self.name),
- stderr=devnull, stdout=devnull)
- rc = popen.wait()
- if rc != 0:
- self.display.display("- command %s failed" % ' '.join(archive_cmd))
- self.display.display(" in directory %s" % tempdir)
- return False
-
- rmtree(tempdir, ignore_errors=True)
+ # use the first path by default
+ self.path = os.path.join(galaxy.roles_paths[0], self.name)
- return temp_file.name
+ def __eq__(self, other):
+ return self.name == other.name
@property
def metadata(self):
@@ -114,7 +84,7 @@ class GalaxyRole(object):
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
- self.display.vvvvv("Unable to load metadata for %s" % self.name)
+ display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
@@ -135,7 +105,7 @@ class GalaxyRole(object):
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
- self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
+ display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
@@ -189,10 +159,10 @@ class GalaxyRole(object):
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
- self.display.display("- downloading role from %s" % archive_url)
+ display.display("- downloading role from %s" % archive_url)
try:
- url_file = urlopen(archive_url)
+ url_file = open_url(archive_url)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
@@ -203,7 +173,7 @@ class GalaxyRole(object):
except:
# TODO: better urllib2 error handling for error
# messages that are more exact
- self.display.error("failed to download the file.")
+ display.error("failed to download the file.")
return False
@@ -212,7 +182,7 @@ class GalaxyRole(object):
# to the specified (or default) roles directory
if not tarfile.is_tarfile(role_filename):
- self.display.error("the file downloaded was not a tar.gz")
+ display.error("the file downloaded was not a tar.gz")
return False
else:
if role_filename.endswith('.gz'):
@@ -228,32 +198,32 @@ class GalaxyRole(object):
meta_file = member
break
if not meta_file:
- self.display.error("this role does not appear to have a meta/main.yml file.")
+ display.error("this role does not appear to have a meta/main.yml file.")
return False
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
- self.display.error("this role does not appear to have a valid meta/main.yml file.")
+ display.error("this role does not appear to have a valid meta/main.yml file.")
return False
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
- self.display.display("- extracting %s to %s" % (self.name, self.path))
+ display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
- self.display.error("the specified roles path exists and is not a directory.")
+ display.error("the specified roles path exists and is not a directory.")
return False
elif not getattr(self.options, "force", False):
- self.display.error("the specified role %s appears to already exist. Use --force to replace it." % self.name)
+ display.error("the specified role %s appears to already exist. Use --force to replace it." % self.name)
return False
else:
# using --force, remove the old path
if not self.remove():
- self.display.error("%s doesn't appear to contain a role." % self.path)
- self.display.error(" please remove this directory manually if you really want to put the role here.")
+ display.error("%s doesn't appear to contain a role." % self.path)
+ display.error(" please remove this directory manually if you really want to put the role here.")
return False
else:
os.makedirs(self.path)
@@ -275,11 +245,11 @@ class GalaxyRole(object):
# write out the install info file for later use
self._write_galaxy_install_info()
except OSError as e:
- self.display.error("Could not update files in %s: %s" % (self.path, str(e)))
+ display.error("Could not update files in %s: %s" % (self.path, str(e)))
return False
# return the parsed yaml metadata
- self.display.display("- %s was installed successfully" % self.name)
+ display.display("- %s was installed successfully" % self.name)
return True
@property
@@ -312,3 +282,49 @@ class GalaxyRole(object):
trailing_path = trailing_path.split(',')[0]
return trailing_path
+ @staticmethod
+ def scm_archive_role(scm, role_url, role_version, role_name):
+ if scm not in ['hg', 'git']:
+ display.display("- scm %s is not currently supported" % scm)
+ return False
+ tempdir = tempfile.mkdtemp()
+ clone_cmd = [scm, 'clone', role_url, role_name]
+ with open('/dev/null', 'w') as devnull:
+ try:
+ display.display("- executing: %s" % " ".join(clone_cmd))
+ popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
+ except:
+ raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
+ rc = popen.wait()
+ if rc != 0:
+ display.display("- command %s failed" % ' '.join(clone_cmd))
+ display.display(" in directory %s" % tempdir)
+ return False
+
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
+ if scm == 'hg':
+ archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
+ if role_version:
+ archive_cmd.extend(['-r', role_version])
+ archive_cmd.append(temp_file.name)
+ if scm == 'git':
+ archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
+ if role_version:
+ archive_cmd.append(role_version)
+ else:
+ archive_cmd.append('HEAD')
+
+ with open('/dev/null', 'w') as devnull:
+ display.display("- executing: %s" % " ".join(archive_cmd))
+ popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
+ stderr=devnull, stdout=devnull)
+ rc = popen.wait()
+ if rc != 0:
+ display.display("- command %s failed" % ' '.join(archive_cmd))
+ display.display(" in directory %s" % tempdir)
+ return False
+
+ rmtree(tempdir, ignore_errors=True)
+
+ return temp_file.name
+
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
index e4ff5132d8..cd787e6876 100644
--- a/lib/ansible/inventory/__init__.py
+++ b/lib/ansible/inventory/__init__.py
@@ -24,6 +24,7 @@ import os
import sys
import re
import stat
+import itertools
from ansible import constants as C
from ansible.errors import AnsibleError
@@ -33,6 +34,7 @@ from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.plugins import vars_loader
from ansible.utils.vars import combine_vars
+from ansible.parsing.utils.addresses import parse_address
try:
from __main__ import display
@@ -45,10 +47,6 @@ class Inventory(object):
Host inventory for ansible.
"""
- #__slots__ = [ 'host_list', 'groups', '_restriction', '_subset',
- # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
-
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
@@ -63,16 +61,14 @@ class Inventory(object):
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
- self._groups_list = {}
self._pattern_cache = {}
self._vars_plugins = []
- self._groups_cache = {}
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
# the inventory object holds a list of groups
- self.groups = []
+ self.groups = {}
# a list of host(names) to contain current inquiries to
self._restriction = None
@@ -87,63 +83,56 @@ class Inventory(object):
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
+ self.parser = None
+
+ # Always create the 'all' and 'ungrouped' groups, even if host_list is
+ # empty: in this case we will subsequently an the implicit 'localhost' to it.
+
+ ungrouped = Group(name='ungrouped')
+ all = Group('all')
+ all.add_child_group(ungrouped)
+
+ self.groups = dict(all=all, ungrouped=ungrouped)
+
if host_list is None:
- self.parser = None
+ pass
elif isinstance(host_list, list):
- self.parser = None
- all = Group('all')
- self.groups = [ all ]
- ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
- for x in host_list:
- m = ipv6_re.match(x)
- if m:
- all.add_host(Host(m.groups()[0], m.groups()[1]))
- else:
- if ":" in x:
- tokens = x.rsplit(":", 1)
- # if there is ':' in the address, then this is an ipv6
- if ':' in tokens[0]:
- all.add_host(Host(x))
- else:
- all.add_host(Host(tokens[0], tokens[1]))
- else:
- all.add_host(Host(x))
- elif os.path.exists(host_list):
+ for h in host_list:
+ (host, port) = parse_address(h, allow_ranges=False)
+ all.add_host(Host(host, port))
+ elif self._loader.path_exists(host_list):
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
- if os.path.isdir(host_list):
+ if self._loader.is_directory(host_list):
# Ensure basedir is inside the directory
host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
+ self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
else:
- self.parser = get_file_parser(host_list, self._loader)
+ self.parser = get_file_parser(host_list, self.groups, self._loader)
vars_loader.add_directory(self.basedir(), with_subdir=True)
- if self.parser:
- self.groups = self.parser.groups.values()
- else:
+ if not self.parser:
# should never happen, but JIC
raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
- self._vars_plugins = [ x for x in vars_loader.all(self) ]
+ self._vars_plugins = [ x for x in vars_loader.all(self) ]
# FIXME: shouldn't be required, since the group/host vars file
# management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
- for group in self.groups:
+ for group in self.groups.values():
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
-
def _match(self, str, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
- except Exception, e:
+ except Exception as e:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
def _match_list(self, items, item_attr, pattern_str):
@@ -153,7 +142,7 @@ class Inventory(object):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
- except Exception, e:
+ except Exception as e:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
for item in items:
@@ -161,52 +150,80 @@ class Inventory(object):
results.append(item)
return results
- def _split_pattern(self, pattern):
- """
- takes e.g. "webservers[0:5]:dbservers:others"
- and returns ["webservers[0:5]", "dbservers", "others"]
- """
-
- term = re.compile(
- r'''(?: # We want to match something comprising:
- [^:\[\]] # (anything other than ':', '[', or ']'
- | # ...or...
- \[[^\]]*\] # a single complete bracketed expression)
- )* # repeated as many times as possible
- ''', re.X
- )
-
- return [x for x in term.findall(pattern) if x]
-
- def get_hosts(self, pattern="all"):
+ def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
- # Enumerate all hosts matching the given pattern (which may be
- # either a list of patterns or a string like 'pat1:pat2').
- if isinstance(pattern, list):
- pattern = ':'.join(pattern)
-
- if ';' in pattern or ',' in pattern:
- display.deprecated("Use ':' instead of ',' or ';' to separate host patterns", version=2.0, removed=True)
-
patterns = self._split_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
- # exclude hosts not in a subset, if defined
- if self._subset:
- subset = self._evaluate_patterns(self._subset)
- hosts = [ h for h in hosts if h in subset ]
+ # mainly useful for hostvars[host] access
+ if not ignore_limits_and_restrictions:
+ # exclude hosts not in a subset, if defined
+ if self._subset:
+ subset = self._evaluate_patterns(self._subset)
+ hosts = [ h for h in hosts if h in subset ]
- # exclude hosts mentioned in any restriction (ex: failed hosts)
- if self._restriction is not None:
- hosts = [ h for h in hosts if h in self._restriction ]
+ # exclude hosts mentioned in any restriction (ex: failed hosts)
+ if self._restriction is not None:
+ hosts = [ h for h in hosts if h in self._restriction ]
return hosts
+ def _split_pattern(self, pattern):
+ """
+ Takes a string containing host patterns separated by commas (or a list
+ thereof) and returns a list of single patterns (which may not contain
+ commas). Whitespace is ignored.
+
+ Also accepts ':' as a separator for backwards compatibility, but it is
+ not recommended due to the conflict with IPv6 addresses and host ranges.
+
+ Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
+ """
+
+ if isinstance(pattern, list):
+ return list(itertools.chain(*map(self._split_pattern, pattern)))
+
+ if ';' in pattern:
+ display.deprecated("Use ',' instead of ':' or ';' to separate host patterns", version=2.0, removed=True)
+
+ # If it's got commas in it, we'll treat it as a straightforward
+ # comma-separated list of patterns.
+
+ elif ',' in pattern:
+ patterns = re.split('\s*,\s*', pattern)
+
+ # If it doesn't, it could still be a single pattern. This accounts for
+ # non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
+
+ else:
+ (base, port) = parse_address(pattern, allow_ranges=True)
+ if base:
+ patterns = [pattern]
+
+ # The only other case we accept is a ':'-separated list of patterns.
+ # This mishandles IPv6 addresses, and is retained only for backwards
+ # compatibility.
+
+ else:
+ patterns = re.findall(
+ r'''(?: # We want to match something comprising:
+ [^\s:\[\]] # (anything other than whitespace or ':[]'
+ | # ...or...
+ \[[^\]]*\] # a single complete bracketed expression)
+ )+ # occurring once or more
+ ''', pattern, re.X
+ )
+
+ if len(patterns) > 1:
+ display.deprecated("Use ',' instead of ':' or ';' to separate host patterns", version=2.0)
+
+ return [p.strip() for p in patterns]
+
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
@@ -253,110 +270,136 @@ class Inventory(object):
def _match_one_pattern(self, pattern):
"""
- Takes a single pattern (i.e., not "p1:p2") and returns a list of
- matching hosts names. Does not take negatives or intersections
- into account.
+ Takes a single pattern and returns a list of matching host names.
+ Ignores intersection (&) and exclusion (!) specifiers.
+
+ The pattern may be:
+
+ 1. A regex starting with ~, e.g. '~[abc]*'
+ 2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
+ 3. An ordinary word that matches itself only, e.g. 'foo'
+
+ The pattern is matched using the following rules:
+
+ 1. If it's 'all', it matches all hosts in all groups.
+ 2. Otherwise, for each known group name:
+ (a) if it matches the group name, the results include all hosts
+ in the group or any of its children.
+ (b) otherwise, if it matches any hosts in the group, the results
+ include the matching hosts.
+
+ This means that 'foo*' may match one or more groups (thus including all
+ hosts therein) but also hosts in other groups.
+
+ The built-in groups 'all' and 'ungrouped' are special. No pattern can
+ match these group names (though 'all' behaves as though it matches, as
+ described above). The word 'ungrouped' can match a host of that name,
+ and patterns like 'ungr*' and 'al*' can match either hosts or groups
+ other than all and ungrouped.
+
+ If the pattern matches one or more group names according to these rules,
+ it may have an optional range suffix to select a subset of the results.
+ This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
+ not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
+ would work if 'foo*' matched the name of one or more groups.
+
+ Duplicate matches are always eliminated from the results.
"""
- if pattern in self._pattern_cache:
- return self._pattern_cache[pattern]
+ if pattern.startswith("&") or pattern.startswith("!"):
+ pattern = pattern[1:]
- (name, enumeration_details) = self._enumeration_info(pattern)
- hpat = self._hosts_in_unenumerated_pattern(name)
- result = self._apply_ranges(pattern, hpat)
- self._pattern_cache[pattern] = result
- return result
+ if pattern not in self._pattern_cache:
+ (expr, slice) = self._split_subscript(pattern)
+ hosts = self._enumerate_matches(expr)
+ try:
+ hosts = self._apply_subscript(hosts, slice)
+ except IndexError:
+ raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
+ self._pattern_cache[pattern] = hosts
- def _enumeration_info(self, pattern):
+ return self._pattern_cache[pattern]
+
+ def _split_subscript(self, pattern):
"""
- returns (pattern, limits) taking a regular pattern and finding out
- which parts of it correspond to start/stop offsets. limits is
- a tuple of (start, stop) or None
+ Takes a pattern, checks if it has a subscript, and returns the pattern
+ without the subscript and a (start,end) tuple representing the given
+ subscript (or None if there is no subscript).
+
+ Validates that the subscript is in the right syntax, but doesn't make
+ sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
- # The regex used to match on the range, which can be [x] or [x-y].
- pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
- m = pattern_re.match(pattern)
+ # We want a pattern followed by an integer or range subscript.
+ # (We can't be more restrictive about the expression because the
+ # fnmatch semantics permit [\[:\]] to occur.)
+
+ pattern_with_subscript = re.compile(
+ r'''^
+ (.+) # A pattern expression ending with...
+ \[(?: # A [subscript] expression comprising:
+ (-?[0-9]+)| # A single positive or negative number
+ ([0-9]+)([:-]) # Or an x:y or x: range.
+ ([0-9]*)
+ )\]
+ $
+ ''', re.X
+ )
+
+ subscript = None
+ m = pattern_with_subscript.match(pattern)
if m:
- (target, first, last, rest) = m.groups()
- first = int(first)
- if last:
- if first < 0:
- raise AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
- last = int(last)
+ (pattern, idx, start, sep, end) = m.groups()
+ if idx:
+ subscript = (int(idx), None)
else:
- last = first
- return (target, (first, last))
- else:
- return (pattern, None)
+ if not end:
+ end = -1
+ subscript = (int(start), int(end))
+ if sep == '-':
+ display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True)
- def _apply_ranges(self, pat, hosts):
+ return (pattern, subscript)
+
+ def _apply_subscript(self, hosts, subscript):
"""
- given a pattern like foo, that matches hosts, return all of hosts
- given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
+ Takes a list of hosts and a (start,end) tuple and returns the subset of
+ hosts based on the subscript (which may be None to return all hosts).
"""
- # If there are no hosts to select from, just return the
- # empty set. This prevents trying to do selections on an empty set.
- # issue#6258
- if not hosts:
- return hosts
-
- (loose_pattern, limits) = self._enumeration_info(pat)
- if not limits:
+ if not hosts or not subscript:
return hosts
- (left, right) = limits
+ (start, end) = subscript
- if left == '':
- left = 0
- if right == '':
- right = 0
- left=int(left)
- right=int(right)
- try:
- if left != right:
- return hosts[left:right]
- else:
- return [ hosts[left] ]
- except IndexError:
- raise AnsibleError("no hosts matching the pattern '%s' were found" % pat)
-
- def _create_implicit_localhost(self, pattern):
- new_host = Host(pattern)
- new_host.set_variable("ansible_python_interpreter", sys.executable)
- new_host.set_variable("ansible_connection", "local")
- new_host.ipv4_address = '127.0.0.1'
-
- ungrouped = self.get_group("ungrouped")
- if ungrouped is None:
- self.add_group(Group('ungrouped'))
- ungrouped = self.get_group('ungrouped')
- self.get_group('all').add_child_group(ungrouped)
- ungrouped.add_host(new_host)
- return new_host
+ if end:
+ if end == -1:
+ end = len(hosts)-1
+ return hosts[start:end+1]
+ else:
+ return [ hosts[start] ]
- def _hosts_in_unenumerated_pattern(self, pattern):
- """ Get all host names matching the pattern """
+ def _enumerate_matches(self, pattern):
+ """
+ Returns a list of host names matching the given pattern according to the
+ rules explained above in _match_one_pattern.
+ """
results = []
hosts = []
hostnames = set()
- # ignore any negative checks here, this is handled elsewhere
- pattern = pattern.replace("!","").replace("&", "")
-
def __append_host_to_results(host):
if host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
- for group in groups:
+ for group in groups.values():
if pattern == 'all':
for host in group.get_hosts():
__append_host_to_results(host)
@@ -374,6 +417,14 @@ class Inventory(object):
results.append(new_host)
return results
+ def _create_implicit_localhost(self, pattern):
+ new_host = Host(pattern)
+ new_host.set_variable("ansible_python_interpreter", sys.executable)
+ new_host.set_variable("ansible_connection", "local")
+ new_host.address = '127.0.0.1'
+ self.get_group("ungrouped").add_host(new_host)
+ return new_host
+
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
self._pattern_cache = {}
@@ -384,19 +435,6 @@ class Inventory(object):
else:
return []
- def groups_list(self):
- if not self._groups_list:
- groups = {}
- for g in self.groups:
- groups[g.name] = [h.name for h in g.get_hosts()]
- ancestors = g.get_ancestors()
- for a in ancestors:
- if a.name not in groups:
- groups[a.name] = [h.name for h in a.get_hosts()]
- self._groups_list = groups
- self._groups_cache = {}
- return self._groups_list
-
def get_groups(self):
return self.groups
@@ -415,7 +453,7 @@ class Inventory(object):
return host
return self._create_implicit_localhost(hostname)
matching_host = None
- for group in self.groups:
+ for group in self.groups.values():
for host in group.get_hosts():
if hostname == host.name:
matching_host = host
@@ -423,11 +461,7 @@ class Inventory(object):
return matching_host
def get_group(self, groupname):
- if not self._groups_cache:
- for group in self.groups:
- self._groups_cache[group.name] = group
-
- return self._groups_cache.get(groupname)
+ return self.groups[groupname]
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
if groupname not in self._vars_per_group or update_cached:
@@ -457,7 +491,7 @@ class Inventory(object):
host = self.get_host(hostname)
if not host:
- raise Exception("host not found: %s" % hostname)
+ raise AnsibleError("no vars as host is not in inventory: %s" % hostname)
return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@@ -470,7 +504,7 @@ class Inventory(object):
host = self.get_host(hostname)
if host is None:
- raise AnsibleError("host not found: %s" % hostname)
+ raise AnsibleError("no host vars as host is not in inventory: %s" % hostname)
vars = {}
@@ -498,10 +532,8 @@ class Inventory(object):
return vars
def add_group(self, group):
- if group.name not in self.groups_list():
- self.groups.append(group)
- self._groups_list = None # invalidate internal cache
- self._groups_cache = {}
+ if group.name not in self.groups:
+ self.groups[group.name] = group
else:
raise AnsibleError("group already in inventory: %s" % group.name)
@@ -515,7 +547,7 @@ class Inventory(object):
return result
def list_groups(self):
- return sorted([ g.name for g in self.groups ], key=lambda x: x)
+ return sorted(self.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
@@ -539,9 +571,6 @@ class Inventory(object):
if subset_pattern is None:
self._subset = None
else:
- if ';' in subset_pattern or ',' in subset_pattern:
- display.deprecated("Use ':' instead of ',' or ';' to separate host patterns", version=2.0, removed=True)
-
subset_patterns = self._split_pattern(subset_pattern)
results = []
# allow Unix style @filename data
@@ -562,21 +591,23 @@ class Inventory(object):
""" did inventory come from a file? """
if not isinstance(self.host_list, basestring):
return False
- return os.path.exists(self.host_list)
+ return self._loader.path_exists(self.host_list)
def basedir(self):
""" if inventory came from a file, what's the directory? """
dname = self.host_list
if not self.is_file():
dname = None
- elif os.path.isdir(self.host_list):
+ elif self._loader.is_directory(self.host_list):
dname = self.host_list
else:
dname = os.path.dirname(self.host_list)
if dname is None or dname == '' or dname == '.':
cwd = os.getcwd()
dname = cwd
- return os.path.abspath(dname)
+ if dname:
+ dname = os.path.abspath(dname)
+ return dname
def src(self):
""" if inventory came from a file, what's the directory and file name? """
@@ -602,7 +633,7 @@ class Inventory(object):
# we do this shouldn't be too much of an issue. Still, this should
# be fixed at some point to allow a "first load" to touch all of the
# directories, then later runs only touch the new basedir specified
- for group in self.groups:
+ for group in self.groups.values():
#group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
group.vars = combine_vars(group.vars, self.get_group_vars(group))
# get host vars from host_vars/ files
@@ -677,8 +708,6 @@ class Inventory(object):
self._hosts_cache = {}
self._vars_per_host = {}
self._vars_per_group = {}
- self._groups_list = {}
- self._groups_cache = {}
- self.groups = []
+ self.groups = {}
self.parse_inventory(self.host_list)
diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py
index e456a950d4..9394696d7f 100644
--- a/lib/ansible/inventory/dir.py
+++ b/lib/ansible/inventory/dir.py
@@ -29,13 +29,12 @@ from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
-from ansible.utils.path import is_executable
from ansible.inventory.ini import InventoryParser as InventoryINIParser
from ansible.inventory.script import InventoryScript
__all__ = ['get_file_parser']
-def get_file_parser(hostsfile, loader):
+def get_file_parser(hostsfile, groups, loader):
# check to see if the specified file starts with a
# shebang (#!/), so if an error is raised by the parser
# class we can show a more apropos error
@@ -54,9 +53,9 @@ def get_file_parser(hostsfile, loader):
except:
pass
- if is_executable(hostsfile):
+ if loader.is_executable(hostsfile):
try:
- parser = InventoryScript(loader=loader, filename=hostsfile)
+ parser = InventoryScript(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append("The file %s is marked as executable, but failed to execute correctly. " % hostsfile + \
@@ -65,10 +64,10 @@ def get_file_parser(hostsfile, loader):
if not processed:
try:
- parser = InventoryINIParser(filename=hostsfile)
+ parser = InventoryINIParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
- if shebang_present and not is_executable(hostsfile):
+ if shebang_present and not loader.is_executable(hostsfile):
myerr.append("The file %s looks like it should be an executable inventory script, but is not marked executable. " % hostsfile + \
"Perhaps you want to correct this with `chmod +x %s`?" % hostsfile)
else:
@@ -82,13 +81,16 @@ def get_file_parser(hostsfile, loader):
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
- def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
+ if groups is None:
+ groups = dict()
+
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
self.parsers = []
self.hosts = {}
- self.groups = {}
+ self.groups = groups
self._loader = loader
@@ -107,7 +109,7 @@ class InventoryDirectory(object):
if os.path.isdir(fullpath):
parser = InventoryDirectory(loader=loader, filename=fullpath)
else:
- parser = get_file_parser(fullpath, loader)
+ parser = get_file_parser(fullpath, self.groups, loader)
if parser is None:
#FIXME: needs to use display
import warnings
diff --git a/lib/ansible/inventory/expand_hosts.py b/lib/ansible/inventory/expand_hosts.py
index 0d63ba08bb..7e1c127c27 100644
--- a/lib/ansible/inventory/expand_hosts.py
+++ b/lib/ansible/inventory/expand_hosts.py
@@ -44,7 +44,7 @@ def detect_range(line = None):
Returnes True if the given line contains a pattern, else False.
'''
- if 0 <= line.find("[") < line.find(":") < line.find("]"):
+ if '[' in line:
return True
else:
return False
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
index 43a96d54bf..77a0b21f50 100644
--- a/lib/ansible/inventory/host.py
+++ b/lib/ansible/inventory/host.py
@@ -46,8 +46,7 @@ class Host:
return dict(
name=self.name,
vars=self.vars.copy(),
- ipv4_address=self.ipv4_address,
- ipv6_address=self.ipv6_address,
+ address=self.address,
gathered_facts=self._gathered_facts,
groups=groups,
)
@@ -55,10 +54,9 @@ class Host:
def deserialize(self, data):
self.__init__()
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
- self.ipv4_address = data.get('ipv4_address', '')
- self.ipv6_address = data.get('ipv6_address', '')
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.address = data.get('address', '')
groups = data.get('groups', [])
for group_data in groups:
@@ -72,11 +70,10 @@ class Host:
self.vars = {}
self.groups = []
- self.ipv4_address = name
- self.ipv6_address = name
+ self.address = name
if port:
- self.set_variable('ansible_ssh_port', int(port))
+ self.set_variable('ansible_port', int(port))
self._gathered_facts = False
@@ -114,12 +111,15 @@ class Host:
def get_vars(self):
results = {}
- groups = self.get_groups()
- for group in sorted(groups, key=lambda g: g.depth):
- results = combine_vars(results, group.get_vars())
results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
- results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
+ results['group_names'] = sorted([ g.name for g in self.get_groups() if g.name != 'all'])
return results
+ def get_group_vars(self):
+ results = {}
+ groups = self.get_groups()
+ for group in sorted(groups, key=lambda g: g.depth):
+ results = combine_vars(results, group.get_vars())
+ return results
diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py
index 2769632ef2..a2a90c76cf 100644
--- a/lib/ansible/inventory/ini.py
+++ b/lib/ansible/inventory/ini.py
@@ -29,7 +29,8 @@ from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible.utils.unicode import to_unicode
+from ansible.parsing.utils.addresses import parse_address
+from ansible.utils.unicode import to_unicode, to_bytes
class InventoryParser(object):
"""
@@ -37,7 +38,11 @@ class InventoryParser(object):
with their associated hosts and variable settings.
"""
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
+ if groups is None:
+ groups = dict()
+
+ self._loader = loader
self.filename = filename
# Start with an empty host list and the default 'all' and
@@ -45,25 +50,19 @@ class InventoryParser(object):
self.hosts = {}
self.patterns = {}
- self.groups = dict(
- all = Group(name='all'),
- ungrouped = Group(name='ungrouped')
- )
+ self.groups = groups
# Read in the hosts, groups, and variables defined in the
# inventory file.
- with open(filename) as fh:
- self._parse(fh.readlines())
-
- # Finally, add all top-level groups (including 'ungrouped') as
- # children of 'all'.
-
- for group in self.groups.values():
- if group.depth == 0 and group.name != 'all':
- self.groups['all'].add_child_group(group)
+ if loader:
+ (data, private) = loader._get_file_contents(filename)
+ else:
+ with open(filename) as fh:
+ data = to_unicode(fh.read())
+ data = data.split('\n')
- # Note: we could discard self.hosts after this point.
+ self._parse(data)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self.filename, self.lineno) + message)
@@ -178,6 +177,15 @@ class InventoryParser(object):
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (self.filename, decl['line'], decl['parent'], decl['name']))
+ # Finally, add all top-level groups as children of 'all'.
+ # We exclude ungrouped here because it was already added as a child of
+ # 'all' at the time it was created.
+
+ for group in self.groups.values():
+ if group.depth == 0 and group.name not in ('all', 'ungrouped'):
+ self.groups['all'].add_child_group(group)
+
+
def _parse_group_name(self, line):
'''
Takes a single line and tries to parse it as a group name. Returns the
@@ -223,11 +231,13 @@ class InventoryParser(object):
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
+ line = to_bytes(line)
try:
tokens = shlex.split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (varstring, e))
+ tokens = [ to_unicode(t) for t in tokens]
(hostnames, port) = self._expand_hostpattern(tokens[0])
hosts = self._Hosts(hostnames, port)
@@ -245,8 +255,8 @@ class InventoryParser(object):
for h in hosts:
for k in variables:
h.set_variable(k, variables[k])
- if k == 'ansible_ssh_host':
- h.ipv4_address = variables[k]
+ if k in ['ansible_host', 'ansible_ssh_host']:
+ h.address = variables[k]
return hosts
@@ -256,30 +266,20 @@ class InventoryParser(object):
optional port number that applies to all of them.
'''
- # Is a port number specified?
- #
- # This may be a mandatory :NN suffix on any square-bracketed expression
- # (IPv6 address, IPv4 address, host name, host pattern), or an optional
- # :NN suffix on an IPv4 address, host name, or pattern. IPv6 addresses
- # must be in square brackets if a port is specified.
-
- port = None
+ # Can the given hostpattern be parsed as a host with an optional port
+ # specification?
- for type in ['bracketed_hostport', 'hostport']:
- m = self.patterns[type].match(hostpattern)
- if m:
- (hostpattern, port) = m.groups()
- continue
+ (pattern, port) = parse_address(hostpattern, allow_ranges=True)
+ if not pattern:
+ self._raise_error("Can't parse '%s' as host[:port]" % hostpattern)
- # Now we're left with just the pattern, which results in a list of one
- # or more hostnames, depending on whether it contains any [x:y] ranges.
- #
- # FIXME: We could be more strict here about validation.
+ # Once we have separated the pattern, we expand it into list of one or
+ # more hostnames, depending on whether it contains any [x:y] ranges.
- if detect_range(hostpattern):
- hostnames = expand_hostname_range(hostpattern)
+ if detect_range(pattern):
+ hostnames = expand_hostname_range(pattern)
else:
- hostnames = [hostpattern]
+ hostnames = [pattern]
return (hostnames, port)
@@ -306,8 +306,8 @@ class InventoryParser(object):
@staticmethod
def _parse_value(v):
'''
- Does something with something and returns something. Not for mere
- mortals such as myself to interpret.
+ Attempt to transform the string value from an ini file into a basic python object
+ (int, dict, list, unicode string, etc).
'''
if "#" not in v:
try:
@@ -365,29 +365,3 @@ class InventoryParser(object):
$ # end of the line
''', re.X
)
-
- # The following patterns match the various ways in which a port number
- # may be specified on an IPv6 address, IPv4 address, hostname, or host
- # pattern. All of the above may be enclosed in square brackets with a
- # mandatory :NN suffix; or all but the first may be given without any
- # brackets but with an :NN suffix.
-
- self.patterns['bracketed_hostport'] = re.compile(
- r'''^
- \[(.+)\] # [host identifier]
- :([0-9]+) # :port number
- $
- ''', re.X
- )
-
- self.patterns['hostport'] = re.compile(
- r'''^
- ((?: # We want to match:
- [^:\[\]] # (a non-range character
- | # ...or...
- \[[^\]]*\] # a complete bracketed expression)
- )*) # repeated as many times as possible
- :([0-9]+) # followed by a port number
- $
- ''', re.X
- )
diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py
index 91549d78fb..3a59992afd 100644
--- a/lib/ansible/inventory/script.py
+++ b/lib/ansible/inventory/script.py
@@ -22,9 +22,10 @@ __metaclass__ = type
import os
import subprocess
import sys
-
from collections import Mapping
+from six import iteritems
+
from ansible import constants as C
from ansible.errors import *
from ansible.inventory.host import Host
@@ -35,9 +36,12 @@ from ansible.module_utils.basic import json_dict_bytes_to_unicode
class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
- def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
+ if groups is None:
+ groups = dict()
self._loader = loader
+ self.groups = groups
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
@@ -46,7 +50,7 @@ class InventoryScript:
cmd = [ self.filename, "--list" ]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError, e:
+ except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
@@ -56,7 +60,7 @@ class InventoryScript:
self.data = stdout
# see comment about _meta below
self.host_vars_from_top = None
- self.groups = self._parse(stderr)
+ self._parse(stderr)
def _parse(self, err):
@@ -76,11 +80,7 @@ class InventoryScript:
self.raw = json_dict_bytes_to_unicode(self.raw)
- all = Group('all')
- groups = dict(all=all)
- group = None
-
-
+ group = None
for (group_name, data) in self.raw.items():
# in Ansible 1.3 and later, a "_meta" subelement may contain
@@ -94,10 +94,10 @@ class InventoryScript:
self.host_vars_from_top = data['hostvars']
continue
- if group_name != all.name:
- group = groups[group_name] = Group(group_name)
- else:
- group = all
+ if group_name not in self.groups:
+ group = self.groups[group_name] = Group(group_name)
+
+ group = self.groups[group_name]
host = None
if not isinstance(data, dict):
@@ -122,11 +122,8 @@ class InventoryScript:
raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
- for k, v in data['vars'].iteritems():
- if group.name == all.name:
- all.set_variable(k, v)
- else:
- group.set_variable(k, v)
+ for k, v in iteritems(data['vars']):
+ group.set_variable(k, v)
# Separate loop to ensure all groups are defined
for (group_name, data) in self.raw.items():
@@ -134,14 +131,16 @@ class InventoryScript:
continue
if isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
- if child_name in groups:
- groups[group_name].add_child_group(groups[child_name])
+ if child_name in self.groups:
+ self.groups[group_name].add_child_group(self.groups[child_name])
- for group in groups.values():
- if group.depth == 0 and group.name != 'all':
- all.add_child_group(group)
+ # Finally, add all top-level groups as children of 'all'.
+ # We exclude ungrouped here because it was already added as a child of
+ # 'all' at the time it was created.
- return groups
+ for group in self.groups.values():
+ if group.depth == 0 and group.name not in ('all', 'ungrouped'):
+ self.groups['all'].add_child_group(group)
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
@@ -153,7 +152,7 @@ class InventoryScript:
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError, e:
+ except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
index 5bfd3d2db9..b4d7cd4db1 100644
--- a/lib/ansible/module_utils/cloudstack.py
+++ b/lib/ansible/module_utils/cloudstack.py
@@ -94,7 +94,8 @@ class AnsibleCloudStack(object):
method=api_http_method
)
else:
- self.cs = CloudStack(**read_config())
+ api_region = self.module.params.get('api_region', 'cloudstack')
+ self.cs = CloudStack(**read_config(api_region))
def get_or_fallback(self, key=None, fallback_key=None):
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
index 4ba172cebe..f264ef8fdf 100644
--- a/lib/ansible/module_utils/ec2.py
+++ b/lib/ansible/module_utils/ec2.py
@@ -25,6 +25,13 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import os
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except:
+ HAS_BOTO3 = False
try:
from distutils.version import LooseVersion
@@ -37,14 +44,15 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None
if conn_type not in ['both', 'resource', 'client']:
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
- resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
- client = resource.meta.client
-
if conn_type == 'resource':
+ resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
return resource
elif conn_type == 'client':
+ client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params)
return client
else:
+ resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
def aws_common_argument_spec():
@@ -72,7 +80,7 @@ def boto_supports_profile_name():
return hasattr(boto.ec2.EC2Connection, 'profile_name')
-def get_aws_connection_info(module, boto3=False):
+def get_aws_connection_info(module):
# Check module args for credentials, then check environment vars
# access_key
@@ -133,7 +141,7 @@ def get_aws_connection_info(module, boto3=False):
# in case security_token came in as empty string
security_token = None
- if boto3:
+ if HAS_BOTO3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py
index 097a6370af..e04e6b2f1e 100644
--- a/lib/ansible/module_utils/f5.py
+++ b/lib/ansible/module_utils/f5.py
@@ -1,3 +1,6 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 064b5e1292..3d479c68c3 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -97,6 +97,7 @@ class Facts(object):
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
+ ('/etc/slackware-version', 'Slackware'),
('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
@@ -255,9 +256,9 @@ class Facts(object):
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
- XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', SLES = 'Suse',
+ XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
- Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
+ Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
@@ -314,6 +315,21 @@ class Facts(object):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
+ elif name == 'Archlinux':
+ data = get_file_content(path)
+ if 'Arch Linux' in data:
+ self.facts['distribution'] = name
+ else:
+ self.facts['distribution'] = data.split()[0]
+ break
+ elif name == 'Slackware':
+ data = get_file_content(path)
+ if 'Slackware' in data:
+ self.facts['distribution'] = name
+ version = re.findall('\w+[.]\w+', data)
+ if version:
+ self.facts['distribution_version'] = version[0]
+ break
elif name == 'OracleLinux':
data = get_file_content(path)
if 'Oracle Linux' in data:
@@ -2339,7 +2355,7 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network):
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
- def get_interfaces_info(self, ifconfig_path, ifconfig_options):
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py
index 99dbf2c03a..900b9fe80a 100644
--- a/lib/ansible/module_utils/known_hosts.py
+++ b/lib/ansible/module_utils/known_hosts.py
@@ -40,25 +40,38 @@ def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
""" idempotently add a git url hostkey """
- fqdn = get_fqdn(url)
-
- if fqdn:
- known_host = check_hostkey(module, fqdn)
- if not known_host:
- if accept_hostkey:
- rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
- if rc != 0:
- module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
- else:
- module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
+ if is_ssh_url(url):
+
+ fqdn = get_fqdn(url)
+
+ if fqdn:
+ known_host = check_hostkey(module, fqdn)
+ if not known_host:
+ if accept_hostkey:
+ rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
+ if rc != 0:
+ module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
+ else:
+ module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
+
+def is_ssh_url(url):
+
+ """ check if url is ssh """
+
+ if "@" in url and "://" not in url:
+ return True
+ for scheme in "ssh://", "git+ssh://", "ssh+git://":
+ if url.startswith(scheme):
+ return True
+ return False
def get_fqdn(repo_url):
- """ chop the hostname out of a giturl """
+ """ chop the hostname out of a url """
result = None
if "@" in repo_url and "://" not in repo_url:
- # most likely a git@ or ssh+git@ type URL
+ # most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
if ":" in repo_url:
repo_url = repo_url.split(":")[0]
@@ -69,9 +82,6 @@ def get_fqdn(repo_url):
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse.urlparse(repo_url)
- if 'ssh' not in parts[0] and 'git' not in parts[0]:
- # don't try and scan a hostname that's not ssh
- return None
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 4069449144..934d51a271 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -74,13 +74,13 @@ def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
auth_type=dict(default=None),
- auth=dict(default=None),
+ auth=dict(default=None, no_log=True),
region_name=dict(default=None),
availability_zone=dict(default=None),
verify=dict(default=True, aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
- key=dict(default=None),
+ key=dict(default=None, no_log=True),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1
index ee65916216..5756d360f1 100644
--- a/lib/ansible/module_utils/powershell.ps1
+++ b/lib/ansible/module_utils/powershell.ps1
@@ -26,6 +26,8 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
+Set-StrictMode -Version Latest
+
# Ansible v2 will insert the module arguments below as a string containing
# JSON; assign them to an environment variable and redefine $args so existing
# modules will continue to work.
@@ -47,7 +49,14 @@ Function Set-Attr($obj, $name, $value)
$obj = New-Object psobject
}
- $obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
+ Try
+ {
+ $obj.$name = $value
+ }
+ Catch
+ {
+ $obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
+ }
}
# Helper function to convert a powershell object to JSON to echo it, exiting
@@ -78,7 +87,7 @@ Function Fail-Json($obj, $message = $null)
$obj = New-Object psobject
}
# If the first args is undefined or not an object, make it an object
- ElseIf (-not $obj.GetType -or $obj.GetType().Name -ne "PSCustomObject")
+ ElseIf (-not $obj -or -not $obj.GetType -or $obj.GetType().Name -ne "PSCustomObject")
{
$obj = New-Object psobject
}
@@ -94,24 +103,32 @@ Function Fail-Json($obj, $message = $null)
# slightly more pythonic
# Example: $attr = Get-Attr $response "code" -default "1"
#Note that if you use the failifempty option, you do need to specify resultobject as well.
-Function Get-Attr($obj, $name, $default = $null,$resultobj, $failifempty=$false, $emptyattributefailmessage)
+Function Get-Attr($obj, $name, $default = $null, $resultobj, $failifempty=$false, $emptyattributefailmessage)
{
- # Check if the provided Member $name exists in $obj and return it or the
- # default
- If ($obj.$name.GetType)
+ # Check if the provided Member $name exists in $obj and return it or the default.
+ Try
{
+ If (-not $obj.$name.GetType)
+ {
+ throw
+ }
$obj.$name
}
- Elseif($failifempty -eq $false)
- {
- $default
- }
- else
+ Catch
{
- if (!$emptyattributefailmessage) {$emptyattributefailmessage = "Missing required argument: $name"}
- Fail-Json -obj $resultobj -message $emptyattributefailmessage
+ If ($failifempty -eq $false)
+ {
+ $default
+ }
+ Else
+ {
+ If (!$emptyattributefailmessage)
+ {
+ $emptyattributefailmessage = "Missing required argument: $name"
+ }
+ Fail-Json -obj $resultobj -message $emptyattributefailmessage
+ }
}
- return
}
# Helper filter/pipeline function to convert a value to boolean following current
diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py
new file mode 100644
index 0000000000..2d4cd39d5b
--- /dev/null
+++ b/lib/ansible/module_utils/vca.py
@@ -0,0 +1,303 @@
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ from pyvcloud.vcloudair import VCA
+ HAS_PYVCLOUD = True
+except ImportError:
+ HAS_PYVCLOUD = False
+
+SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
+LOGIN_HOST = {'vca': 'vca.vmware.com', 'vchs': 'vchs.vmware.com'}
+
+DEFAULT_SERVICE_TYPE = 'vca'
+DEFAULT_VERSION = '5.7'
+
+class VcaError(Exception):
+
+ def __init__(self, msg, **kwargs):
+ self.kwargs = kwargs
+ super(VcaError, self).__init__(msg)
+
+def vca_argument_spec():
+ return dict(
+ username=dict(),
+ password=dict(),
+ org=dict(),
+ service_id=dict(),
+ instance_id=dict(),
+ host=dict(),
+ api_version=dict(default=DEFAULT_VERSION),
+ service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
+ vdc_name=dict(),
+ gateway_name=dict(default='gateway')
+ )
+
+class VcaAnsibleModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ argument_spec = vca_argument_spec()
+ argument_spec.update(kwargs.get('argument_spec', dict()))
+ kwargs['argument_spec'] = argument_spec
+
+ super(VcaAnsibleModule, self).__init__(*args, **kwargs)
+
+ if not HAS_PYVCLOUD:
+ self.fail("python module pyvcloud is required for this module")
+
+ self._vca = self.create_instance()
+ self.login()
+
+ self._gateway = None
+ self._vdc = None
+
+ @property
+ def vca(self):
+ return self._vca
+
+ @property
+ def gateway(self):
+ if self._gateway is not None:
+ return self._gateway
+ vdc_name = self.params['vdc_name']
+ gateway_name = self.params['gateway_name']
+ _gateway = self.vca.get_gateway(vdc_name, gateway_name)
+ if not _gateway:
+ raise VcaError('vca instance has no gateway named %s' % name)
+ self._gateway = _gateway
+ return _gateway
+
+ @property
+ def vdc(self):
+ if self._vdc is not None:
+ return self._vdc
+ _vdc = self.vca.get_vdc(self.params['vdc_name'])
+ if not _vdc:
+ raise VcaError('vca instance has no vdc named %s' % name)
+ self._vdc = _vdc
+ return _vdc
+
+ def create_instance(self):
+ service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
+ host = self.params.get('host', LOGIN_HOST.get('service_type'))
+ username = self.params['username']
+
+ version = self.params.get('api_version')
+ if service_type == 'vchs':
+ version = '5.6'
+
+ verify = self.params.get('verify_certs')
+
+ return VCA(host=host, username=username,
+ service_type=SERVICE_MAP[service_type],
+ version=version, verify=verify)
+
+ def login(self):
+ service_type = self.params['service_type']
+ password = self.params['password']
+
+ if not self.vca.login(password=password):
+ self.fail('Login to VCA failed', response=self.vca.response.content)
+
+ try:
+ method_name = 'login_%s' % service_type
+ meth = getattr(self, method_name)
+ meth()
+ except AttributeError:
+ self.fail('no login method exists for service_type %s' % service_type)
+ except VcaError, e:
+ self.fail(e.message, response=self.vca.response.content, **e.kwargs)
+
+ def login_vca(self):
+ instance_id = self.params['instance_id']
+ if not instance_id:
+ raise VcaError('missing required instance_id for service_type vca')
+ self.vca.login_to_instance_sso(instance=instance_id)
+
+ def login_vchs(self):
+ service_id = self.params['service_id']
+ if not service_id:
+ raise VcaError('missing required service_id for service_type vchs')
+
+ org = self.params['org']
+ if not org:
+ raise VcaError('missing required or for service_type vchs')
+
+ self.vca.login_to_org(service_id, org)
+
+ def login_vcd(self):
+ org = self.params['org']
+ if not org:
+ raise VcaError('missing required or for service_type vchs')
+
+ if not self.vca.token:
+ raise VcaError('unable to get token for service_type vcd')
+
+ if not self.vca.vcloud_session.org_url:
+ raise VcaError('unable to get org_url for service_type vcd')
+
+ self.vca.login(token=self.vca.token, org=org,
+ org_url=self.vca.vcloud_session.org_url)
+
+ def save_services_config(self, blocking=True):
+ task = self.gateway.save_services_configuration()
+ if not task:
+ self.fail(msg='unable to save gateway services configuration')
+ if blocking:
+ self.vca.block_until_completed(task)
+
+ def fail(self, msg, **kwargs):
+ self.fail_json(msg=msg, **kwargs)
+
+ def exit(self, **kwargs):
+ self.exit_json(**kwargs)
+
+
+
+# -------------------------------------------------------------
+# 9/18/2015 @privateip
+# All of the functions below here were migrated from the original
+# vca_* modules. All functions below should be considered deprecated
+# and will be removed once all of the vca_* modules have been updated
+# to use the new instance module above
+# -------------------------------------------------------------
+
+VCA_REQ_ARGS = ['instance_id', 'vdc_name']
+VCHS_REQ_ARGS = ['service_id']
+
+
+def _validate_module(module):
+ if not HAS_PYVCLOUD:
+ module.fail_json("python module pyvcloud is needed for this module")
+
+ service_type = module.params.get('service_type', DEFAULT_SERVICE_TYPE)
+
+ if service_type == 'vca':
+ for arg in VCA_REQ_ARGS:
+ if module.params.get(arg) is None:
+ module.fail_json("argument %s is mandatory when service type "
+ "is vca" % arg)
+
+ if service_type == 'vchs':
+ for arg in VCHS_REQ_ARGS:
+ if module.params.get(arg) is None:
+ module.fail_json("argument %s is mandatory when service type "
+ "is vchs" % arg)
+
+ if service_type == 'vcd':
+ for arg in VCD_REQ_ARGS:
+ if module.params.get(arg) is None:
+ module.fail_json("argument %s is mandatory when service type "
+ "is vcd" % arg)
+
+
+def serialize_instances(instance_list):
+ instances = []
+ for i in instance_list:
+ instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id']))
+ return instances
+
+def _vca_login(vca, password, instance):
+ if not vca.login(password=password):
+ raise VcaError("Login Failed: Please check username or password",
+ error=vca.response.content)
+
+ if not vca.login_to_instance_sso(instance=instance):
+ s_json = serialize_instances(vca.instances)
+ raise VcaError("Login to Instance failed: Seems like instance_id provided "
+ "is wrong .. Please check", valid_instances=s_json)
+
+ return vca
+
+def _vchs_login(vca, password, service, org):
+ if not vca.login(password=password):
+ raise VcaError("Login Failed: Please check username or password",
+ error=vca.response.content)
+
+ if not vca.login_to_org(service, org):
+ raise VcaError("Failed to login to org, Please check the orgname",
+ error=vca.response.content)
+
+
+def _vcd_login(vca, password, org):
+ # TODO: this function needs to be refactored
+ if not vca.login(password=password, org=org):
+ raise VcaError("Login Failed: Please check username or password "
+ "or host parameters")
+
+ if not vca.login(password=password, org=org):
+ raise VcaError("Failed to get the token",
+ error=vca.response.content)
+
+ if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
+ raise VcaError("Failed to login to org", error=vca.response.content)
+
+def vca_login(module):
+ service_type = module.params.get('service_type')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ instance = module.params.get('instance_id')
+ org = module.params.get('org')
+ vdc_name = module.params.get('vdc_name')
+ service = module.params.get('service_id')
+ version = module.params.get('api_version')
+ verify = module.params.get('verify_certs')
+
+ _validate_module(module)
+
+ if not vdc_name and service_type == 'vchs':
+ vdc_name = module.params.get('service_id')
+
+ if not org and service_type == 'vchs':
+ org = vdc_name or service
+
+ if service_type == 'vcd':
+ host = module.params.get('host')
+ else:
+ host = LOGIN_HOST[service_type]
+
+ username = os.environ.get('VCA_USER', username)
+ password = os.environ.get('VCA_PASS', password)
+
+ if not username or not password:
+ msg = "Either the username or password is not set, please check args"
+ module.fail_json(msg=msg)
+
+ if service_type == 'vchs':
+ version = '5.6'
+ elif service_type == 'vcd' and not version:
+ version == '5.6'
+
+ vca = VCA(host=host, username=username,
+ service_type=SERVICE_MAP[service_type],
+ version=version, verify=verify)
+
+ try:
+ if service_type == 'vca':
+ _vca_login(vca, password, instance)
+ elif service_type == 'vchs':
+ _vchs_login(vca, password, service, org)
+ elif service_type == 'vcd':
+ _vcd_login(vca, password, org)
+ except VcaError, e:
+ module.fail_json(msg=e.message, **e.kwargs)
+
+ return vca
+
+
+
+
+
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
-Subproject 85ddb1b90232dbd68798e9b2d7dafa5689a1d30
+Subproject 59afecace4354e7fd3aba42e9a285242030585a
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
-Subproject 7a0bfd91bbfd12dcfd0cd5263f193ede30804fe
+Subproject dee690d7f4a6c977a89b85279af1698eb4f515e
diff --git a/lib/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py
index 024763feac..0c8e21fbd2 100644
--- a/lib/ansible/parsing/__init__.py
+++ b/lib/ansible/parsing/__init__.py
@@ -22,8 +22,10 @@ __metaclass__ = type
import copy
import json
import os
+import stat
from yaml import load, YAMLError
+from six import text_type
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
@@ -55,11 +57,15 @@ class DataLoader():
ds = dl.load_from_file('/path/to/file')
'''
- def __init__(self, vault_password=None):
+ def __init__(self):
self._basedir = '.'
- self._vault_password = vault_password
self._FILE_CACHE = dict()
+ # initialize the vault stuff with an empty password
+ self.set_vault_password(None)
+
+ def set_vault_password(self, vault_password):
+ self._vault_password = vault_password
self._vault = VaultLib(password=vault_password)
def load(self, data, file_name='<string>', show_content=True):
@@ -80,7 +86,7 @@ class DataLoader():
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
- new_data = unicode(data)
+ new_data = text_type(data)
else:
new_data = data
try:
@@ -119,7 +125,7 @@ class DataLoader():
def is_file(self, path):
path = self.path_dwim(path)
- return os.path.isfile(path)
+ return os.path.isfile(path) or path == os.devnull
def is_directory(self, path):
path = self.path_dwim(path)
@@ -129,6 +135,11 @@ class DataLoader():
path = self.path_dwim(path)
return os.listdir(path)
+ def is_executable(self, path):
+ '''is the given path executable?'''
+ path = self.path_dwim(path)
+ return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
+
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
@@ -248,3 +259,29 @@ class DataLoader():
return candidate
+ def read_vault_password_file(self, vault_password_file):
+ """
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
+
+ if self.is_executable(this_path):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ self.set_vault_password(stdout.strip('\r\n'))
+ else:
+ try:
+ f = open(this_path, "rb")
+ self.set_vault_password(f.read().strip())
+ f.close()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
+
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
index 6b10dd5308..a6ccaa0a56 100644
--- a/lib/ansible/parsing/mod_args.py
+++ b/lib/ansible/parsing/mod_args.py
@@ -148,13 +148,12 @@ class ModuleArgsParser:
else:
(action, args) = self._normalize_new_style_args(thing)
- # this can occasionally happen, simplify
- if args and 'args' in args:
- tmp_args = args['args']
- del args['args']
- if isinstance(tmp_args, string_types):
- tmp_args = parse_kv(tmp_args)
- args.update(tmp_args)
+ # this can occasionally happen, simplify
+ if args and 'args' in args:
+ tmp_args = args.pop('args')
+ if isinstance(tmp_args, string_types):
+ tmp_args = parse_kv(tmp_args)
+ args.update(tmp_args)
# finally, update the args we're going to return with the ones
# which were normalized above
@@ -254,16 +253,16 @@ class ModuleArgsParser:
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# local_action
+ local_action = False
if 'local_action' in self._task_ds:
# local_action is similar but also implies a connection='local'
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
connection = 'local'
+ local_action = True
action, args = self._normalize_parameters(thing, additional_args=additional_args)
- # module: <stuff> is the more new-style invocation
-
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in module_loader or item == 'meta' or item == 'include':
@@ -288,4 +287,8 @@ class ModuleArgsParser:
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
+ # now add the local action flag to the args, if it was set
+ if local_action:
+ args['_local_action'] = local_action
+
return (action, args, connection)
diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py
new file mode 100644
index 0000000000..387f05c627
--- /dev/null
+++ b/lib/ansible/parsing/utils/addresses.py
@@ -0,0 +1,215 @@
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+# Components that match a numeric or alphanumeric begin:end or begin:end:step
+# range expression inside square brackets.
+
+numeric_range = r'''
+ \[
+ (?:[0-9]+:[0-9]+) # numeric begin:end
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+hexadecimal_range = r'''
+ \[
+ (?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+alphanumeric_range = r'''
+ \[
+ (?:
+ [a-z]:[a-z]| # one-char alphabetic range
+ [0-9]+:[0-9]+ # ...or a numeric one
+ )
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+# Components that match a 16-bit portion of an IPv6 address in hexadecimal
+# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
+# (0..255) or an [x:y(:z)] numeric range.
+
+ipv6_component = r'''
+ (?:
+ [0-9a-f]{{1,4}}| # 0..ffff
+ {range} # or a numeric range
+ )
+'''.format(range=hexadecimal_range)
+
+ipv4_component = r'''
+ (?:
+ [01]?[0-9]{{1,2}}| # 0..199
+ 2[0-4][0-9]| # 200..249
+ 25[0-5]| # 250..255
+ {range} # or a numeric range
+ )
+'''.format(range=numeric_range)
+
+# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
+# characters plus dashes (and underscores) or valid ranges. The label may not
+# start or end with a hyphen or an underscore. This is interpolated into the
+# hostname pattern below. We don't try to enforce the 63-char length limit.
+
+label = r'''
+ (?:[\w]|{range}) # Starts with an alphanumeric or a range
+ (?:[\w_-]|{range})* # Then zero or more of the same or [_-]
+ (?<![_-]) # ...as long as it didn't end with [_-]
+'''.format(range=alphanumeric_range)
+
+patterns = {
+ # This matches a square-bracketed expression with a port specification. What
+ # is inside the square brackets is validated later.
+
+ 'bracketed_hostport': re.compile(
+ r'''^
+ \[(.+)\] # [host identifier]
+ :([0-9]+) # :port number
+ $
+ ''', re.X
+ ),
+
+ # This matches a bare IPv4 address or hostname (or host pattern including
+ # [x:y(:z)] ranges) with a port specification.
+
+ 'hostport': re.compile(
+ r'''^
+ ((?: # We want to match:
+ [^:\[\]] # (a non-range character
+ | # ...or...
+ \[[^\]]*\] # a complete bracketed expression)
+ )*) # repeated as many times as possible
+ :([0-9]+) # followed by a port number
+ $
+ ''', re.X
+ ),
+
+ # This matches an IPv4 address, but also permits range expressions.
+
+ 'ipv4': re.compile(
+ r'''^
+ (?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
+ $
+ '''.format(i4=ipv4_component), re.X|re.I
+ ),
+
+ # This matches an IPv6 address, but also permits range expressions.
+ #
+ # This expression looks complex, but it really only spells out the various
+ # combinations in which the basic unit of an IPv6 address (0..ffff) can be
+ # written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
+ # as ::ffff:192.0.2.3.
+ #
+ # Note that we can't just use ipaddress.ip_address() because we also have to
+ # accept ranges in place of each component.
+
+ 'ipv6': re.compile(
+ r'''^
+ (?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
+ (?:{0}:){{1,6}}:| # compressed variants, which are all
+ (?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
+ (?:{0}:){{2}}(?::{0}){{1,5}}|
+ (?:{0}:){{3}}(?::{0}){{1,4}}|
+ (?:{0}:){{4}}(?::{0}){{1,3}}|
+ (?:{0}:){{5}}(?::{0}){{1,2}}|
+ (?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
+ :(?::{0}){{1,6}}| # ::ffff(:ffff...)
+ {0}?::| # ffff::, ::
+ # ipv4-in-ipv6 variants
+ (?:0:){{6}}(?:{0}\.){{3}}{0}|
+ ::(?:ffff:)?(?:{0}\.){{3}}{0}|
+ (?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
+ $
+ '''.format(ipv6_component), re.X|re.I
+ ),
+
+ # This matches a hostname or host pattern including [x:y(:z)] ranges.
+ #
+ # We roughly follow DNS rules here, but also allow ranges (and underscores).
+ # In the past, no systematic rules were enforced about inventory hostnames,
+ # but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
+ # various metacharacters anyway.
+ #
+ # We don't enforce DNS length restrictions here (63 characters per label,
+ # 253 characters total) or make any attempt to process IDNs.
+
+ 'hostname': re.compile(
+ r'''^
+ {label} # We must have at least one label
+ (?:\.{label})* # Followed by zero or more .labels
+ $
+ '''.format(label=label), re.X|re.I|re.UNICODE
+ ),
+}
+
+def parse_address(address, allow_ranges=False):
+ """
+ Takes a string and returns a (host, port) tuple. If the host is None, then
+ the string could not be parsed as a host identifier with an optional port
+ specification. If the port is None, then no port was specified.
+
+ The host identifier may be a hostname (qualified or not), an IPv4 address,
+ or an IPv6 address. If allow_ranges is True, then any of those may contain
+ [x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
+
+ The port number is an optional :NN suffix on an IPv4 address or host name,
+ or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
+ IPv4 address, or host name. (This means the only way to specify a port for
+ an IPv6 address is to enclose it in square brackets.)
+ """
+
+ # First, we extract the port number if one is specified.
+
+ port = None
+ for type in ['bracketed_hostport', 'hostport']:
+ m = patterns[type].match(address)
+ if m:
+ (address, port) = m.groups()
+ port = int(port)
+ continue
+
+ # What we're left with now must be an IPv4 or IPv6 address, possibly with
+ # numeric ranges, or a hostname with alphanumeric ranges.
+
+ host = None
+ for type in ['ipv4', 'ipv6', 'hostname']:
+ m = patterns[type].match(address)
+ if m:
+ host = address
+ continue
+
+ # If it isn't any of the above, we don't understand it.
+
+ if not host:
+ return (None, None)
+
+ # If we get to this point, we know that any included ranges are valid. If
+ # the caller is prepared to handle them, all is well. Otherwise we treat
+ # it as a parse failure.
+
+ if not allow_ranges and '[' in host:
+ return (None, None)
+
+ return (host, port)
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
index 55346166b6..f1e544204a 100644
--- a/lib/ansible/parsing/vault/__init__.py
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -12,11 +12,6 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
@@ -25,6 +20,7 @@ __metaclass__ = type
import os
import shlex
import shutil
+import sys
import tempfile
from io import BytesIO
from subprocess import call
@@ -135,13 +131,13 @@ class VaultLib:
b_data = to_bytes(data, errors='strict', encoding='utf-8')
if self.is_encrypted(b_data):
- raise AnsibleError("data is already encrypted")
+ raise AnsibleError("input is already encrypted")
- if not self.cipher_name:
+ if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST:
self.cipher_name = u"AES256"
cipher_class_name = u'Vault{0}'.format(self.cipher_name)
- if cipher_class_name in globals() and self.cipher_name in CIPHER_WHITELIST:
+ if cipher_class_name in globals():
Cipher = globals()[cipher_class_name]
this_cipher = Cipher()
else:
@@ -167,7 +163,7 @@ class VaultLib:
raise AnsibleError("A vault password must be specified to decrypt data")
if not self.is_encrypted(b_data):
- raise AnsibleError("data is not encrypted")
+ raise AnsibleError("input is not encrypted")
# clean out header
b_data = self._split_header(b_data)
@@ -230,18 +226,11 @@ class VaultLib:
class VaultEditor:
- # uses helper methods for write_file(self, filename, data)
- # to write a file so that code isn't duplicated for simple
- # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
- # ... "Don't Repeat Yourself", etc.
-
- def __init__(self, cipher_name, password, filename):
- # instantiates a member variable for VaultLib
- self.cipher_name = cipher_name
- self.password = password
- self.filename = filename
- def _edit_file_helper(self, existing_data=None, cipher=None, force_save=False):
+ def __init__(self, password):
+ self.vault = VaultLib(password)
+
+ def _edit_file_helper(self, filename, existing_data=None, force_save=False):
# make sure the umask is set to a sane value
old_umask = os.umask(0o077)
@@ -260,128 +249,104 @@ class VaultEditor:
os.remove(tmp_path)
return
- # create new vault
- this_vault = VaultLib(self.password)
- if cipher:
- this_vault.cipher_name = cipher
-
# encrypt new data and write out to tmp
- enc_data = this_vault.encrypt(tmpdata)
+ enc_data = self.vault.encrypt(tmpdata)
self.write_data(enc_data, tmp_path)
# shuffle tmp file into place
- self.shuffle_files(tmp_path, self.filename)
+ self.shuffle_files(tmp_path, filename)
# and restore umask
os.umask(old_umask)
- def create_file(self):
- """ create a new encrypted file """
+ def encrypt_file(self, filename, output_file=None):
check_prereqs()
- if os.path.isfile(self.filename):
- raise AnsibleError("%s exists, please use 'edit' instead" % self.filename)
+ plaintext = self.read_data(filename)
+ ciphertext = self.vault.encrypt(plaintext)
+ self.write_data(ciphertext, output_file or filename)
- # Let the user specify contents and save file
- self._edit_file_helper(cipher=self.cipher_name)
+ def decrypt_file(self, filename, output_file=None):
+
+ check_prereqs()
- def decrypt_file(self):
+ ciphertext = self.read_data(filename)
+ plaintext = self.vault.decrypt(ciphertext)
+ self.write_data(plaintext, output_file or filename)
+
+ def create_file(self, filename):
+ """ create a new encrypted file """
check_prereqs()
- if not os.path.isfile(self.filename):
- raise AnsibleError("%s does not exist" % self.filename)
+ # FIXME: If we can raise an error here, we can probably just make it
+ # behave like edit instead.
+ if os.path.isfile(filename):
+ raise AnsibleError("%s exists, please use 'edit' instead" % filename)
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- if this_vault.is_encrypted(tmpdata):
- dec_data = this_vault.decrypt(tmpdata)
- if dec_data is None:
- raise AnsibleError("Decryption failed")
- else:
- self.write_data(dec_data, self.filename)
- else:
- raise AnsibleError("%s is not encrypted" % self.filename)
+ self._edit_file_helper(filename)
- def edit_file(self):
+ def edit_file(self, filename):
check_prereqs()
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
+ ciphertext = self.read_data(filename)
+ plaintext = self.vault.decrypt(ciphertext)
- # let the user edit the data and save
- if this_vault.cipher_name not in CIPHER_WRITE_WHITELIST:
+ if self.vault.cipher_name not in CIPHER_WRITE_WHITELIST:
# we want to get rid of files encrypted with the AES cipher
- self._edit_file_helper(existing_data=dec_data, cipher=None, force_save=True)
+ self._edit_file_helper(filename, existing_data=plaintext, force_save=True)
else:
- self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name, force_save=False)
+ self._edit_file_helper(filename, existing_data=plaintext, force_save=False)
- def view_file(self):
+ def view_file(self, filename):
check_prereqs()
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
+ # FIXME: Why write this to a temporary file at all? It would be safer
+ # to feed it to the PAGER on stdin.
_, tmp_path = tempfile.mkstemp()
- self.write_data(dec_data, tmp_path)
+ ciphertext = self.read_data(filename)
+ plaintext = self.vault.decrypt(ciphertext)
+ self.write_data(plaintext, tmp_path)
# drop the user into pager on the tmp file
call(self._pager_shell_command(tmp_path))
os.remove(tmp_path)
- def encrypt_file(self):
-
- check_prereqs()
-
- if not os.path.isfile(self.filename):
- raise AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- this_vault.cipher_name = self.cipher_name
- if not this_vault.is_encrypted(tmpdata):
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, self.filename)
- else:
- raise AnsibleError("%s is already encrypted" % self.filename)
-
- def rekey_file(self, new_password):
+ def rekey_file(self, filename, new_password):
check_prereqs()
- # decrypt
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
+ ciphertext = self.read_data(filename)
+ plaintext = self.vault.decrypt(ciphertext)
- # create new vault
new_vault = VaultLib(new_password)
-
- # we want to force cipher to the default
- #new_vault.cipher_name = this_vault.cipher_name
-
- # re-encrypt data and re-write file
- enc_data = new_vault.encrypt(dec_data)
- self.write_data(enc_data, self.filename)
+ new_ciphertext = new_vault.encrypt(plaintext)
+ self.write_data(new_ciphertext, filename)
def read_data(self, filename):
- f = open(filename, "rb")
- tmpdata = f.read()
- f.close()
- return tmpdata
+ try:
+ if filename == '-':
+ data = sys.stdin.read()
+ else:
+ with open(filename, "rb") as fh:
+ data = fh.read()
+ except Exception as e:
+ raise AnsibleError(str(e))
+
+ return data
def write_data(self, data, filename):
- if os.path.isfile(filename):
- os.remove(filename)
- f = open(filename, "wb")
- f.write(to_bytes(data, errors='strict'))
- f.close()
+ bytes = to_bytes(data, errors='strict')
+ if filename == '-':
+ sys.stdout.write(bytes)
+ else:
+ if os.path.isfile(filename):
+ os.remove(filename)
+ with open(filename, "wb") as fh:
+ fh.write(bytes)
def shuffle_files(self, src, dest):
# overwrite dest with src
@@ -483,39 +448,7 @@ class VaultAES:
""" Read plaintext data from in_file and write encrypted to out_file """
- # combine sha + data
- this_sha = to_bytes(sha256(data).hexdigest())
- tmp_data = this_sha + b"\n" + data
-
- in_file = BytesIO(tmp_data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
-
- # Get a block of random data. EL does not have Crypto.Random.new()
- # so os.urandom is used for cross platform purposes
- salt = os.urandom(bs - len(b'Salted__'))
-
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- full = to_bytes(b'Salted__' + salt)
- out_file.write(full)
- finished = False
- while not finished:
- chunk = in_file.read(1024 * bs)
- if len(chunk) == 0 or len(chunk) % bs != 0:
- padding_length = (bs - len(chunk) % bs) or bs
- chunk += to_bytes(padding_length * chr(padding_length), errors='strict', encoding='ascii')
- finished = True
- out_file.write(cipher.encrypt(chunk))
-
- out_file.seek(0)
- enc_data = out_file.read()
- tmp_data = hexlify(enc_data)
-
- return tmp_data
-
+ raise AnsibleError("Encryption disabled for deprecated VaultAES class")
def decrypt(self, data, password, key_length=32):
diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py
index dc498acd06..3c7bd3120b 100644
--- a/lib/ansible/parsing/yaml/dumper.py
+++ b/lib/ansible/parsing/yaml/dumper.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
+from six import PY3
from ansible.parsing.yaml.objects import AnsibleUnicode
@@ -30,8 +31,13 @@ class AnsibleDumper(yaml.SafeDumper):
'''
pass
+if PY3:
+ represent_unicode = yaml.representer.SafeRepresenter.represent_str
+else:
+ represent_unicode = yaml.representer.SafeRepresenter.represent_unicode
+
AnsibleDumper.add_representer(
AnsibleUnicode,
- yaml.representer.SafeRepresenter.represent_unicode
+ represent_unicode,
)
diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
index ec243abcd7..703d9dbca1 100644
--- a/lib/ansible/playbook/attribute.py
+++ b/lib/ansible/playbook/attribute.py
@@ -19,19 +19,39 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+
class Attribute:
- def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0):
+ def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0, always_post_validate=False):
+
+ self.isa = isa
+ self.private = private
+ self.default = default
+ self.required = required
+ self.listof = listof
+ self.priority = priority
+ self.always_post_validate = always_post_validate
+
+ def __eq__(self, other):
+ return other.priority == self.priority
+
+ def __ne__(self, other):
+ return other.priority != self.priority
+
+ # NB: higher priority numbers sort first
+
+ def __lt__(self, other):
+ return other.priority < self.priority
+
+ def __gt__(self, other):
+ return other.priority > self.priority
+
+ def __le__(self, other):
+ return other.priority <= self.priority
- self.isa = isa
- self.private = private
- self.default = default
- self.required = required
- self.listof = listof
- self.priority = priority
+ def __ge__(self, other):
+ return other.priority >= self.priority
- def __cmp__(self, other):
- return cmp(other.priority, self.priority)
class FieldAttribute(Attribute):
pass
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
index eba7d8fbc7..c4fa631b61 100644
--- a/lib/ansible/playbook/base.py
+++ b/lib/ansible/playbook/base.py
@@ -27,7 +27,7 @@ from functools import partial
from inspect import getmembers
from io import FileIO
-from six import iteritems, string_types
+from six import iteritems, string_types, text_type
from jinja2.exceptions import UndefinedError
@@ -37,7 +37,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.debug import debug
-from ansible.utils.vars import combine_vars
+from ansible.utils.vars import combine_vars, isidentifier
from ansible.template import template
class Base:
@@ -48,12 +48,18 @@ class Base:
_remote_user = FieldAttribute(isa='string')
# variables
- _vars = FieldAttribute(isa='dict', default=dict())
+ _vars = FieldAttribute(isa='dict', default=dict(), priority=100)
# flags and misc. settings
_environment = FieldAttribute(isa='list')
_no_log = FieldAttribute(isa='bool')
+ # param names which have been deprecated/removed
+ DEPRECATED_ATTRIBUTES = [
+ 'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags',
+ 'su', 'su_user', 'su_pass', 'su_exe', 'su_flags',
+ ]
+
def __init__(self):
# initialize the data loader and variable manager, which will be provided
@@ -225,6 +231,12 @@ class Base:
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
+ else:
+ # and make sure the attribute is of the type it should be
+ value = getattr(self, name)
+ if value is not None:
+ if attribute.isa == 'string' and isinstance(value, (list, dict)):
+ raise AnsibleParserError("The field '%s' is supposed to be a string type, however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds())
def copy(self):
'''
@@ -266,6 +278,11 @@ class Base:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
+ elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
+ # Intermediate objects like Play() won't have their fields validated by
+ # default, as their values are often inherited by other objects and validated
+ # later, so we don't want them to fail out early
+ continue
try:
# Run the post-validator if present. These methods are responsible for
@@ -286,11 +303,19 @@ class Base:
# and make sure the attribute is of the type it should be
if value is not None:
if attribute.isa == 'string':
- value = unicode(value)
+ value = text_type(value)
elif attribute.isa == 'int':
value = int(value)
+ elif attribute.isa == 'float':
+ value = float(value)
elif attribute.isa == 'bool':
value = boolean(value)
+ elif attribute.isa == 'percent':
+ # special value, which may be an integer or float
+ # with an optional '%' at the end
+ if isinstance(value, string_types) and '%' in value:
+ value = value.replace('%', '')
+ value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
@@ -371,14 +396,21 @@ class Base:
list into a single dictionary.
'''
+ def _validate_variable_keys(ds):
+ for key in ds:
+ if not isidentifier(key):
+ raise TypeError("%s is not a valid variable name" % key)
+
try:
if isinstance(ds, dict):
+ _validate_variable_keys(ds)
return ds
elif isinstance(ds, list):
all_vars = dict()
for item in ds:
if not isinstance(item, dict):
raise ValueError
+ _validate_variable_keys(item)
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
@@ -387,6 +419,8 @@ class Base:
raise ValueError
except ValueError:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__, obj=ds)
+ except TypeError as e:
+ raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds)
def _extend_value(self, value, new_value):
'''
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
index 6f49204414..c0a9ca9bcd 100644
--- a/lib/ansible/playbook/block.py
+++ b/lib/ansible/playbook/block.py
@@ -144,7 +144,7 @@ class Block(Base, Become, Conditional, Taggable):
# use_handlers=self._use_handlers,
# )
- def copy(self, exclude_parent=False):
+ def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
@@ -162,13 +162,14 @@ class Block(Base, Become, Conditional, Taggable):
new_me._use_handlers = self._use_handlers
new_me._dep_chain = self._dep_chain[:]
- new_me.block = _dupe_task_list(self.block or [], new_me)
- new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
- new_me.always = _dupe_task_list(self.always or [], new_me)
+ if not exclude_tasks:
+ new_me.block = _dupe_task_list(self.block or [], new_me)
+ new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
+ new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._parent_block = None
if self._parent_block and not exclude_parent:
- new_me._parent_block = self._parent_block.copy()
+ new_me._parent_block = self._parent_block.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
@@ -197,6 +198,8 @@ class Block(Base, Become, Conditional, Taggable):
data['role'] = self._role.serialize()
if self._task_include is not None:
data['task_include'] = self._task_include.serialize()
+ if self._parent_block is not None:
+ data['parent_block'] = self._parent_block.copy(exclude_tasks=True).serialize()
return data
@@ -230,6 +233,12 @@ class Block(Base, Become, Conditional, Taggable):
ti.deserialize(ti_data)
self._task_include = ti
+ pb_data = data.get('parent_block')
+ if pb_data:
+ pb = Block()
+ pb.deserialize(pb_data)
+ self._parent_block = pb
+
def evaluate_conditional(self, templar, all_vars):
if len(self._dep_chain):
for dep in self._dep_chain:
@@ -325,16 +334,20 @@ class Block(Base, Become, Conditional, Taggable):
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
- if task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
+ if isinstance(task, Block):
+ tmp_list.append(evaluate_block(task))
+ elif task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
- new_block = self.copy()
- new_block.block = evaluate_and_append_task(self.block)
- new_block.rescue = evaluate_and_append_task(self.rescue)
- new_block.always = evaluate_and_append_task(self.always)
+ def evaluate_block(block):
+ new_block = self.copy()
+ new_block.block = evaluate_and_append_task(block.block)
+ new_block.rescue = evaluate_and_append_task(block.rescue)
+ new_block.always = evaluate_and_append_task(block.always)
+ return new_block
- return new_block
+ return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
index f0acbbdb3f..acfd54e8a0 100644
--- a/lib/ansible/playbook/conditional.py
+++ b/lib/ansible/playbook/conditional.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.exceptions import UndefinedError
+from six import text_type
from ansible.errors import *
from ansible.playbook.attribute import FieldAttribute
@@ -59,14 +60,14 @@ class Conditional:
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
- if hasattr(self, 'get_ds'):
- ds = self.get_ds()
+ if hasattr(self, '_ds'):
+ ds = getattr(self, '_ds')
try:
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
- except Exception, e:
+ except Exception as e:
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=ds)
return True
@@ -82,7 +83,7 @@ class Conditional:
if conditional is None or conditional == '':
return True
- if conditional in all_vars and '-' not in unicode(all_vars[conditional]):
+ if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
conditional = all_vars[conditional]
# make sure the templar is using the variables specifed to this method
diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
index 98bef15e2a..958e72206c 100644
--- a/lib/ansible/playbook/helpers.py
+++ b/lib/ansible/playbook/helpers.py
@@ -20,8 +20,6 @@ __metaclass__ = type
import os
-from types import NoneType
-
from ansible.errors import AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence
@@ -52,10 +50,12 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non
variable_manager=variable_manager,
loader=loader
)
- # Implicit blocks are created by bare tasks listed in a play withou
+ # Implicit blocks are created by bare tasks listed in a play without
# an explicit block statement. If we have two implicit blocks in a row,
# squash them down to a single block to save processing time later.
if b._implicit and len(block_list) > 0 and block_list[-1]._implicit:
+ for t in b.block:
+ t._block = block_list[-1]
block_list[-1].block.extend(b.block)
else:
block_list.append(b)
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
index d8bc497dcf..8a7cd7c244 100644
--- a/lib/ansible/playbook/included_file.py
+++ b/lib/ansible/playbook/included_file.py
@@ -78,18 +78,20 @@ class IncludedFile:
parent_include = original_task._task_include
while parent_include is not None:
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
+ include_target = templar.template(include_result['include'])
if original_task._role:
new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir)
- include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_result['include'])
+ include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
else:
- include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_result['include'])
+ include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target)
if os.path.exists(include_file):
break
else:
parent_include = parent_include._task_include
elif original_task._role:
- include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
+ include_target = templar.template(include_result['include'])
+ include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target)
else:
include_file = loader.path_dwim(res._task.args.get('_raw_params'))
else:
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 7b3a862911..5324677651 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -31,10 +31,18 @@ from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.playbook.task import Task
+from ansible.vars import preprocess_vars
__all__ = ['Play']
+try:
+ from __main__ import display
+ display = display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
class Play(Base, Taggable, Become):
@@ -52,22 +60,22 @@ class Play(Base, Taggable, Become):
# Connection-Related Attributes
# TODO: generalize connection
- _accelerate = FieldAttribute(isa='bool', default=False)
- _accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
- _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
+ _accelerate = FieldAttribute(isa='bool', default=False, always_post_validate=True)
+ _accelerate_ipv6 = FieldAttribute(isa='bool', default=False, always_post_validate=True)
+ _accelerate_port = FieldAttribute(isa='int', default=5099, always_post_validate=True)
# Connection
- _gather_facts = FieldAttribute(isa='bool', default=None)
- _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types)
- _name = FieldAttribute(isa='string', default='')
+ _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
+ _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True)
+ _name = FieldAttribute(isa='string', default='', always_post_validate=True)
# Variable Attributes
- _vars_files = FieldAttribute(isa='list', default=[])
- _vars_prompt = FieldAttribute(isa='list', default=[])
- _vault_password = FieldAttribute(isa='string')
+ _vars_files = FieldAttribute(isa='list', default=[], priority=99)
+ _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True)
+ _vault_password = FieldAttribute(isa='string', always_post_validate=True)
# Role Attributes
- _roles = FieldAttribute(isa='list', default=[], priority=100)
+ _roles = FieldAttribute(isa='list', default=[], priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=[])
@@ -76,11 +84,11 @@ class Play(Base, Taggable, Become):
_tasks = FieldAttribute(isa='list', default=[])
# Flag/Setting Attributes
- _any_errors_fatal = FieldAttribute(isa='bool', default=False)
- _force_handlers = FieldAttribute(isa='bool')
- _max_fail_percentage = FieldAttribute(isa='string', default='0')
- _serial = FieldAttribute(isa='int', default=0)
- _strategy = FieldAttribute(isa='string', default='linear')
+ _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True)
+ _force_handlers = FieldAttribute(isa='bool', always_post_validate=True)
+ _max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
+ _serial = FieldAttribute(isa='int', default=0, always_post_validate=True)
+ _strategy = FieldAttribute(isa='string', default='linear', always_post_validate=True)
# =================================================================================
@@ -120,9 +128,6 @@ class Play(Base, Taggable, Become):
ds['remote_user'] = ds['user']
del ds['user']
- if 'vars_prompt' in ds and not isinstance(ds['vars_prompt'], list):
- ds['vars_prompt'] = [ ds['vars_prompt'] ]
-
return super(Play, self).preprocess_data(ds)
def _load_hosts(self, attr, ds):
@@ -191,30 +196,28 @@ class Play(Base, Taggable, Become):
roles.append(Role.load(ri, play=self))
return roles
- def _post_validate_vars(self, attr, value, templar):
- '''
- Override post validation of vars on the play, as we don't want to
- template these too early.
- '''
- return value
-
- def _post_validate_vars_files(self, attr, value, templar):
- '''
- Override post validation of vars_files on the play, as we don't want to
- template these too early.
- '''
- return value
-
- # disable validation on various fields which will be validated later in other objects
- def _post_validate_become(self, attr, value, templar):
- return value
- def _post_validate_become_user(self, attr, value, templar):
- return value
- def _post_validate_become_method(self, attr, value, templar):
- return value
+ def _load_vars_prompt(self, attr, ds):
+ new_ds = preprocess_vars(ds)
+ vars_prompts = []
+ for prompt_data in new_ds:
+ if 'name' not in prompt_data:
+ self._display.deprecated("Using the 'short form' for vars_prompt has been deprecated")
+ for vname, prompt in prompt_data.iteritems():
+ vars_prompts.append(dict(
+ name = vname,
+ prompt = prompt,
+ default = None,
+ private = None,
+ confirm = None,
+ encrypt = None,
+ salt_size = None,
+ salt = None,
+ ))
+ else:
+ vars_prompts.append(prompt_data)
+ return vars_prompts
# FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
-
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
index e57648d24e..f843f44b47 100644
--- a/lib/ansible/playbook/play_context.py
+++ b/lib/ansible/playbook/play_context.py
@@ -24,6 +24,10 @@ __metaclass__ = type
import pipes
import random
import re
+import string
+
+from six import iteritems, string_types
+from six.moves import range
from ansible import constants as C
from ansible.errors import AnsibleError
@@ -35,40 +39,6 @@ from ansible.utils.unicode import to_unicode
__all__ = ['PlayContext']
-SU_PROMPT_LOCALIZATIONS = [
- 'Password',
- '암호',
- 'パスワード',
- 'Adgangskode',
- 'Contraseña',
- 'Contrasenya',
- 'Hasło',
- 'Heslo',
- 'Jelszó',
- 'Lösenord',
- 'Mật khẩu',
- 'Mot de passe',
- 'Parola',
- 'Parool',
- 'Pasahitza',
- 'Passord',
- 'Passwort',
- 'Salasana',
- 'Sandi',
- 'Senha',
- 'Wachtwoord',
- 'ססמה',
- 'Лозинка',
- 'Парола',
- 'Пароль',
- 'गुप्तशब्द',
- 'शब्दकूट',
- 'సంకేతపదము',
- 'හස්පදය',
- '密码',
- '密碼',
-]
-
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
@@ -161,6 +131,8 @@ class PlayContext(Base):
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_shell = FieldAttribute(isa='string')
+ _ssh_extra_args = FieldAttribute(isa='string')
+ _connection_lockfd= FieldAttribute(isa='int')
# privilege escalation fields
_become = FieldAttribute(isa='bool')
@@ -189,7 +161,7 @@ class PlayContext(Base):
_step = FieldAttribute(isa='bool', default=False)
_diff = FieldAttribute(isa='bool', default=False)
- def __init__(self, play=None, options=None, passwords=None):
+ def __init__(self, play=None, options=None, passwords=None, connection_lockfd=None):
super(PlayContext, self).__init__()
@@ -199,6 +171,9 @@ class PlayContext(Base):
self.password = passwords.get('conn_pass','')
self.become_pass = passwords.get('become_pass','')
+ # a file descriptor to be used during locking operations
+ self.connection_lockfd = connection_lockfd
+
# set options before play to allow play to override them
if options:
self.set_options(options)
@@ -246,6 +221,7 @@ class PlayContext(Base):
self.remote_user = options.remote_user
self.private_key_file = options.private_key_file
+ self.ssh_extra_args = options.ssh_extra_args
# privilege escalation
self.become = options.become
@@ -267,6 +243,8 @@ class PlayContext(Base):
self.start_at_task = to_unicode(options.start_at_task)
if hasattr(options, 'diff') and options.diff:
self.diff = boolean(options.diff)
+ if hasattr(options, 'timeout') and options.timeout:
+ self.timeout = int(options.timeout)
# get the tag info from options, converting a comma-separated list
# of values into a proper list if need be. We check to see if the
@@ -274,7 +252,7 @@ class PlayContext(Base):
if hasattr(options, 'tags'):
if isinstance(options.tags, list):
self.only_tags.update(options.tags)
- elif isinstance(options.tags, basestring):
+ elif isinstance(options.tags, string_types):
self.only_tags.update(options.tags.split(','))
if len(self.only_tags) == 0:
@@ -283,7 +261,7 @@ class PlayContext(Base):
if hasattr(options, 'skip_tags'):
if isinstance(options.skip_tags, list):
self.skip_tags.update(options.skip_tags)
- elif isinstance(options.skip_tags, basestring):
+ elif isinstance(options.skip_tags, string_types):
self.skip_tags.update(options.skip_tags.split(','))
def set_task_and_variable_override(self, task, variables):
@@ -302,11 +280,17 @@ class PlayContext(Base):
if attr_val is not None:
setattr(new_info, attr, attr_val)
- # finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this
- # connection info object with 'magic' variables from the variable list
- for (attr, variable_names) in MAGIC_VARIABLE_MAPPING.iteritems():
+ # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
+ # connection info object with 'magic' variables from the variable list.
+ # If the value 'ansible_delegated_vars' is in the variables, it means
+ # we have a delegated-to host, so we check there first before looking
+ # at the variables in general
+ delegated_vars = variables.get('ansible_delegated_vars', dict())
+ for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
- if variable_name in variables:
+ if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
+ setattr(new_info, attr, delegated_vars[variable_name])
+ elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
# make sure we get port defaults if needed
@@ -320,6 +304,13 @@ class PlayContext(Base):
elif new_info.become_method == 'su' and new_info.su_pass:
setattr(new_info, 'become_pass', new_info.su_pass)
+
+ # finally, in the special instance that the task was specified
+ # as a local action, override the connection in case it was changed
+ # during some other step in the process
+ if task._local_action:
+ setattr(new_info, 'connection', 'local')
+
return new_info
def make_become_cmd(self, cmd, executable=None):
@@ -327,6 +318,7 @@ class PlayContext(Base):
prompt = None
success_key = None
+ self.prompt = None
if executable is None:
executable = C.DEFAULT_EXECUTABLE
@@ -334,25 +326,38 @@ class PlayContext(Base):
if self.become:
becomecmd = None
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
+ # set executable to use for the privilege escalation method, with various overrides
+ exe = self.become_exe or \
+ getattr(self, '%s_exe' % self.become_method, None) or \
+ C.DEFAULT_BECOME_EXE or \
+ getattr(C, 'DEFAULT_%s_EXE' % self.become_method.upper(), None) or \
+ self.become_method
+
+ # set flags to use for the privilege escalation method, with various overrides
+ flags = self.become_flags or \
+ getattr(self, '%s_flags' % self.become_method, None) or \
+ C.DEFAULT_BECOME_FLAGS or \
+ getattr(C, 'DEFAULT_%s_FLAGS' % self.become_method.upper(), None) or \
+ ''
+
if self.become_method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = self.become_exe or self.sudo_exe or 'sudo'
- flags = self.become_flags or self.sudo_flags or C.DEFAULT_SUDO_FLAGS
# force quick error if password is required but not supplied, should prevent sudo hangs.
- if not self.become_pass:
- flags += " -n "
+ if self.become_pass:
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ becomecmd = '%s %s -p "%s" -S -u %s %s -c %s' % (exe, flags, prompt, self.become_user, executable, success_cmd)
+ else:
+ becomecmd = '%s %s -n -S -u %s %s -c %s' % (exe, flags, self.become_user, executable, success_cmd)
- becomecmd = '%s %s -S -p "%s" -u %s %s -c %s' % (exe, flags, prompt, self.become_user, executable, success_cmd)
elif self.become_method == 'su':
@@ -361,21 +366,15 @@ class PlayContext(Base):
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
prompt = detect_su_prompt
- exe = self.become_exe or self.su_exe or 'su'
- flags = self.become_flags or self.su_flags or ''
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'pbrun':
prompt='assword:'
- exe = self.become_exe or 'pbrun'
- flags = self.become_flags or ''
becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'pfexec':
- exe = self.become_exe or 'pfexec'
- flags = self.become_flags or ''
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
@@ -383,15 +382,12 @@ class PlayContext(Base):
raise AnsibleError("'runas' is not yet implemented")
#TODO: figure out prompt
# this is not for use with winrm plugin but if they ever get ssh native on windoez
- exe = self.become_exe or 'runas'
- flags = self.become_flags or ''
becomecmd = '%s %s /user:%s "%s"' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'doas':
prompt = 'Password:'
exe = self.become_exe or 'doas'
- flags = self.become_flags or ''
if not self.become_pass:
flags += ' -n '
@@ -404,7 +400,8 @@ class PlayContext(Base):
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
- self.prompt = prompt
+ if self.become_pass:
+ self.prompt = prompt
self.success_key = success_key
return ('%s -c %s' % (executable, pipes.quote(becomecmd)))
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
index 777f81c515..247cd3f517 100644
--- a/lib/ansible/playbook/playbook_include.py
+++ b/lib/ansible/playbook/playbook_include.py
@@ -21,6 +21,8 @@ __metaclass__ = type
import os
+from six import iteritems
+
from ansible.errors import AnsibleParserError
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
@@ -95,7 +97,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
- for (k,v) in ds.iteritems():
+ for (k,v) in iteritems(ds):
if k == 'include':
self._preprocess_include(ds, new_ds, k, v)
else:
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
index 1a6f99540c..5e529ca190 100644
--- a/lib/ansible/playbook/role/__init__.py
+++ b/lib/ansible/playbook/role/__init__.py
@@ -25,7 +25,6 @@ import inspect
import os
from hashlib import sha1
-from types import NoneType
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
@@ -46,13 +45,13 @@ __all__ = ['Role', 'hash_params']
# FIXME: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
# in a static method. This is also used in the base class for
-# strategies (ansible/plugins/strategies/__init__.py)
+# strategies (ansible/plugins/strategy/__init__.py)
def hash_params(params):
if not isinstance(params, dict):
return params
else:
s = set()
- for k,v in params.iteritems():
+ for k,v in iteritems(params):
if isinstance(v, dict):
s.update((k, hash_params(v)))
elif isinstance(v, list):
@@ -106,7 +105,7 @@ class Role(Base, Become, Conditional, Taggable):
params['tags'] = role_include.tags
hashed_params = hash_params(params)
if role_include.role in play.ROLE_CACHE:
- for (entry, role_obj) in play.ROLE_CACHE[role_include.role].iteritems():
+ for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.role]):
if hashed_params == entry:
if parent_role:
role_obj.add_parent(parent_role)
@@ -184,16 +183,16 @@ class Role(Base, Become, Conditional, Taggable):
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars')
- if not isinstance(self._role_vars, (dict, NoneType)):
- raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
- elif self._role_vars is None:
+ if self._role_vars is None:
self._role_vars = dict()
+ elif not isinstance(self._role_vars, dict):
+ raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
self._default_vars = self._load_role_yaml('defaults')
- if not isinstance(self._default_vars, (dict, NoneType)):
- raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
- elif self._default_vars is None:
+ if self._default_vars is None:
self._default_vars = dict()
+ elif not isinstance(self._default_vars, dict):
+ raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
def _load_role_yaml(self, subdir):
file_path = os.path.join(self._role_path, subdir)
@@ -370,7 +369,7 @@ class Role(Base, Become, Conditional, Taggable):
def deserialize(self, data, include_deps=True):
self._role_name = data.get('_role_name', '')
self._role_path = data.get('_role_path', '')
- self._role_vars = data.get('_role_vars', dict())
+ self._role_vars = data.get('_role_vars', dict())
self._role_params = data.get('_role_params', dict())
self._default_vars = data.get('_default_vars', dict())
self._had_task_run = data.get('_had_task_run', dict())
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
index a54febe1fe..2d46cb7ddd 100644
--- a/lib/ansible/playbook/role/definition.py
+++ b/lib/ansible/playbook/role/definition.py
@@ -119,7 +119,7 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
# if we have the required datastructures, and if the role_name
# contains a variable, try and template it now
- if self._play and self._variable_manager:
+ if self._variable_manager:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
templar = Templar(loader=self._loader, variables=all_vars)
if templar._contains_vars(role_name):
@@ -178,10 +178,11 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
role_def = dict()
role_params = dict()
+ base_attribute_names = frozenset(self._get_base_attributes().keys())
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
- if key not in [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()]:
+ if key not in base_attribute_names:
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
diff --git a/lib/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
index 03ffc3d710..d7ae9a626a 100644
--- a/lib/ansible/playbook/role/requirement.py
+++ b/lib/ansible/playbook/role/requirement.py
@@ -54,26 +54,26 @@ class RoleRequirement(RoleDefinition):
assert type(ds) == dict or isinstance(ds, string_types)
- role_name = ''
+ role_name = None
role_params = dict()
new_ds = dict()
if isinstance(ds, string_types):
role_name = ds
else:
- ds = self._preprocess_role_spec(ds)
- (new_ds, role_params) = self._split_role_params(ds)
+ (new_ds, role_params) = self._split_role_params(self._preprocess_role_spec(ds))
# pull the role name out of the ds
- role_name = new_ds.get('role_name')
- del ds['role_name']
+ role_name = new_ds.pop('role_name', new_ds.pop('role', None))
+ if role_name is None:
+ raise AnsibleError("Role requirement did not contain a role name!", obj=ds)
return (new_ds, role_name, role_params)
def _preprocess_role_spec(self, ds):
if 'role' in ds:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
- role_info = self._role_spec_parse(ds['role'])
+ role_info = role_spec_parse(ds['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
@@ -96,7 +96,7 @@ class RoleRequirement(RoleDefinition):
ds["role"] = ds["name"]
del ds["name"]
else:
- ds["role"] = self._repo_url_to_role_name(ds["src"])
+ ds["role"] = repo_url_to_role_name(ds["src"])
# set some values to a default value, if none were specified
ds.setdefault('version', '')
@@ -104,63 +104,102 @@ class RoleRequirement(RoleDefinition):
return ds
- def _repo_url_to_role_name(self, repo_url):
- # gets the role name out of a repo like
- # http://git.example.com/repos/repo.git" => "repo"
-
- if '://' not in repo_url and '@' not in repo_url:
- return repo_url
- trailing_path = repo_url.split('/')[-1]
- if trailing_path.endswith('.git'):
- trailing_path = trailing_path[:-4]
- if trailing_path.endswith('.tar.gz'):
- trailing_path = trailing_path[:-7]
- if ',' in trailing_path:
- trailing_path = trailing_path.split(',')[0]
- return trailing_path
-
- def _role_spec_parse(self, role_spec):
- # takes a repo and a version like
- # git+http://git.example.com/repos/repo.git,v1.0
- # and returns a list of properties such as:
- # {
- # 'scm': 'git',
- # 'src': 'http://git.example.com/repos/repo.git',
- # 'version': 'v1.0',
- # 'name': 'repo'
- # }
-
- default_role_versions = dict(git='master', hg='tip')
-
- role_spec = role_spec.strip()
- role_version = ''
- if role_spec == "" or role_spec.startswith("#"):
- return (None, None, None, None)
-
- tokens = [s.strip() for s in role_spec.split(',')]
-
- # assume https://github.com URLs are git+https:// URLs and not
- # tarballs unless they end in '.zip'
- if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
- tokens[0] = 'git+' + tokens[0]
-
- if '+' in tokens[0]:
- (scm, role_url) = tokens[0].split('+')
- else:
- scm = None
- role_url = tokens[0]
-
- if len(tokens) >= 2:
- role_version = tokens[1]
+def repo_url_to_role_name(repo_url):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in repo_url and '@' not in repo_url:
+ return repo_url
+ trailing_path = repo_url.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
+def role_spec_parse(role_spec):
+ # takes a repo and a version like
+ # git+http://git.example.com/repos/repo.git,v1.0
+ # and returns a list of properties such as:
+ # {
+ # 'scm': 'git',
+ # 'src': 'http://git.example.com/repos/repo.git',
+ # 'version': 'v1.0',
+ # 'name': 'repo'
+ # }
+
+ default_role_versions = dict(git='master', hg='tip')
+
+ role_spec = role_spec.strip()
+ role_version = ''
+ if role_spec == "" or role_spec.startswith("#"):
+ return (None, None, None, None)
+
+ tokens = [s.strip() for s in role_spec.split(',')]
+
+ # assume https://github.com URLs are git+https:// URLs and not
+ # tarballs unless they end in '.zip'
+ if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
+ tokens[0] = 'git+' + tokens[0]
+
+ if '+' in tokens[0]:
+ (scm, role_url) = tokens[0].split('+')
+ else:
+ scm = None
+ role_url = tokens[0]
+
+ if len(tokens) >= 2:
+ role_version = tokens[1]
+
+ if len(tokens) == 3:
+ role_name = tokens[2]
+ else:
+ role_name = repo_url_to_role_name(tokens[0])
+
+ if scm and not role_version:
+ role_version = default_role_versions.get(scm, '')
+
+ return dict(scm=scm, src=role_url, version=role_version, role_name=role_name)
+
+# FIXME: all of these methods need to be cleaned up/reorganized below this
+def get_opt(options, k, defval=""):
+ """
+ Returns an option from an Optparse values instance.
+ """
+ try:
+ data = getattr(options, k)
+ except:
+ return defval
+ if k == "roles_path":
+ if os.pathsep in data:
+ data = data.split(os.pathsep)[0]
+ return data
+
+def get_role_path(role_name, options):
+ """
+ Returns the role path based on the roles_path option
+ and the role name.
+ """
+ roles_path = get_opt(options,'roles_path')
+ roles_path = os.path.join(roles_path, role_name)
+ roles_path = os.path.expanduser(roles_path)
+ return roles_path
- if len(tokens) == 3:
- role_name = tokens[2]
+def get_role_metadata(role_name, options):
+ """
+ Returns the metadata as YAML, if the file 'meta/main.yml'
+ exists in the specified role_path
+ """
+ role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
+ try:
+ if os.path.isfile(role_path):
+ f = open(role_path, 'r')
+ meta_data = yaml.safe_load(f)
+ f.close()
+ return meta_data
else:
- role_name = self._repo_url_to_role_name(tokens[0])
-
- if scm and not role_version:
- role_version = default_role_versions.get(scm, '')
-
- return dict(scm=scm, src=role_url, version=role_version, role_name=role_name)
-
-
+ return None
+ except:
+ return None
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 2eda23a7d7..8f7e1b7715 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -19,7 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from six import string_types
+from six import iteritems, string_types
from ansible.errors import AnsibleError
@@ -39,6 +39,13 @@ from ansible.playbook.taggable import Taggable
__all__ = ['Task']
+try:
+ from __main__ import display
+ display = display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
class Task(Base, Conditional, Taggable, Become):
"""
@@ -88,6 +95,10 @@ class Task(Base, Conditional, Taggable, Become):
self._role = role
self._task_include = task_include
+ # special flag for local_action: tasks, to make sure their
+ # connection type of local isn't overridden incorrectly
+ self._local_action = False
+
super(Task, self).__init__()
def get_name(self):
@@ -111,7 +122,7 @@ class Task(Base, Conditional, Taggable, Become):
return ds
elif isinstance(ds, dict):
buf = ""
- for (k,v) in ds.iteritems():
+ for (k,v) in iteritems(ds):
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k,v)
@@ -123,6 +134,16 @@ class Task(Base, Conditional, Taggable, Become):
t = Task(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
+ def load_data(self, ds, variable_manager=None, loader=None):
+ '''
+ We override load_data for tasks so that we can pull special flags
+ out of the task args and set them internaly only so the user never
+ sees them.
+ '''
+ t = super(Task, self).load_data(ds=ds, variable_manager=variable_manager, loader=loader)
+ t._local_action = t.args.pop('_local_action', False)
+ return t
+
def __repr__(self):
''' returns a human readable representation of the task '''
return "TASK: %s" % self.get_name()
@@ -173,7 +194,7 @@ class Task(Base, Conditional, Taggable, Become):
else:
new_ds['vars'] = dict()
- for (k,v) in ds.iteritems():
+ for (k,v) in iteritems(ds):
if k in ('action', 'local_action', 'args', 'connection') or k == action or k == 'shell':
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
@@ -185,7 +206,7 @@ class Task(Base, Conditional, Taggable, Become):
# top level of the task, so we move those into the 'vars' dictionary
# here, and show a deprecation message as we will remove this at
# some point in the future.
- if action == 'include' and k not in self._get_base_attributes():
+ if action == 'include' and k not in self._get_base_attributes() and k not in self.DEPRECATED_ATTRIBUTES:
self._display.deprecated("Specifying include variables at the top-level of the task is deprecated. Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\nfor currently supported syntax regarding included files and variables")
new_ds['vars'][k] = v
else:
@@ -193,6 +214,14 @@ class Task(Base, Conditional, Taggable, Become):
return super(Task, self).preprocess_data(new_ds)
+ def _load_any_errors_fatal(self, attr, value):
+ '''
+ Exists only to show a deprecation warning, as this attribute is not valid
+ at the task level.
+ '''
+ display.deprecated("Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only")
+ return None
+
def post_validate(self, templar):
'''
Override of base class post_validate, to also do final validation on
@@ -245,6 +274,7 @@ class Task(Base, Conditional, Taggable, Become):
def copy(self, exclude_block=False):
new_me = super(Task, self).copy()
+ new_me._local_action = self._local_action
new_me._block = None
if self._block and not exclude_block:
@@ -256,12 +286,13 @@ class Task(Base, Conditional, Taggable, Become):
new_me._task_include = None
if self._task_include:
- new_me._task_include = self._task_include.copy()
+ new_me._task_include = self._task_include.copy(exclude_block=exclude_block)
return new_me
def serialize(self):
data = super(Task, self).serialize()
+ data['_local_action'] = self._local_action
if self._block:
data['block'] = self._block.serialize()
@@ -280,6 +311,7 @@ class Task(Base, Conditional, Taggable, Become):
#from ansible.playbook.task_include import TaskInclude
block_data = data.get('block')
+ self._local_action = data.get('_local_action', False)
if block_data:
b = Block()
diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
index 4054c61633..06f2261138 100644
--- a/lib/ansible/plugins/__init__.py
+++ b/lib/ansible/plugins/__init__.py
@@ -40,18 +40,6 @@ except ImportError:
MODULE_CACHE = {}
PATH_CACHE = {}
PLUGIN_PATH_CACHE = {}
-_basedirs = []
-
-# FIXME: the _basedirs code may be dead, and no longer needed, as
-# we now use add_directory for all plugin types here instead
-# of relying on this global variable (which also causes problems
-# with forked processes). See the Playbook() and Role() classes
-# for how we now ue get_all_plugin_loaders() below.
-def push_basedir(basedir):
- # avoid pushing the same absolute dir more than once
- basedir = to_unicode(os.path.realpath(basedir))
- if basedir not in _basedirs:
- _basedirs.insert(0, basedir)
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in inspect.getmembers(sys.modules[__name__]) if isinstance(obj, PluginLoader)]
@@ -165,22 +153,6 @@ class PluginLoader:
return self._paths
ret = self._extra_dirs[:]
- for basedir in _basedirs:
- fullpath = os.path.realpath(os.path.join(basedir, self.subdir))
- if os.path.isdir(fullpath):
- files = glob.glob("%s/*" % fullpath)
-
- # allow directories to be two levels deep
- files2 = glob.glob("%s/*/*" % fullpath)
-
- if files2 is not None:
- files.extend(files2)
-
- for file in files:
- if os.path.isdir(file) and file not in ret:
- ret.append(file)
- if fullpath not in ret:
- ret.append(fullpath)
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
@@ -345,7 +317,7 @@ callback_loader = PluginLoader(
connection_loader = PluginLoader(
'Connection',
- 'ansible.plugins.connections',
+ 'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
@@ -404,7 +376,7 @@ fragment_loader = PluginLoader(
strategy_loader = PluginLoader(
'StrategyModule',
- 'ansible.plugins.strategies',
+ 'ansible.plugins.strategy',
None,
'strategy_plugins',
required_base_class='StrategyBase',
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
index 6cea019ce1..aa3c630ebb 100644
--- a/lib/ansible/plugins/action/__init__.py
+++ b/lib/ansible/plugins/action/__init__.py
@@ -30,7 +30,7 @@ import tempfile
import time
from ansible import constants as C
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
@@ -142,7 +142,7 @@ class ActionBase:
if tmp and "tmp" in tmp:
# tmp has already been created
return False
- if not self._connection.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become:
+ if not self._connection.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su':
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
@@ -166,7 +166,7 @@ class ActionBase:
tmp_mode = None
if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':
- tmp_mode = 0755
+ tmp_mode = 0o755
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
self._display.debug("executing _low_level_execute_command to create the tmp path")
@@ -190,7 +190,7 @@ class ActionBase:
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
if 'stdout' in result and result['stdout'] != '':
output = output + ": %s" % result['stdout']
- raise AnsibleError(output)
+ raise AnsibleConnectionFailure(output)
# FIXME: do we still need to do this?
#rc = self._connection._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
@@ -275,7 +275,7 @@ class ActionBase:
return data2.split()[0]
except IndexError:
self._display.warning("Calculating checksum failed unusually, please report this to " + \
- "the list so it can be fixed\ncommand: %s\n----\noutput: %s\n----\n") % (cmd, data)
+ "the list so it can be fixed\ncommand: %s\n----\noutput: %s\n----\n" % (cmd, data))
# this will signal that it changed and allow things to keep going
return "INVALIDCHECKSUM"
@@ -347,8 +347,6 @@ class ActionBase:
if self._play_context.no_log:
module_args['_ansible_no_log'] = True
- self._display.debug("in _execute_module (%s, %s)" % (module_name, module_args))
-
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
raise AnsibleError("module is missing interpreter line")
@@ -455,7 +453,9 @@ class ActionBase:
self._display.debug("no command, exiting _low_level_execute_command()")
return dict(stdout='', stderr='')
- if sudoable and self._play_context.become:
+ allow_same_user = C.BECOME_ALLOW_SAME_USER
+ same_user = self._play_context.become_user == self._play_context.remote_user
+ if sudoable and self._play_context.become and (allow_same_user or not same_user):
self._display.debug("using become for this command")
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
index cf2dab1737..0e7d3187e5 100644
--- a/lib/ansible/plugins/action/add_host.py
+++ b/lib/ansible/plugins/action/add_host.py
@@ -23,6 +23,8 @@ __metaclass__ = type
import re
from ansible.plugins.action import ActionBase
+from ansible.parsing.utils.addresses import parse_address
+from ansible.errors import AnsibleError, AnsibleParserError
class ActionModule(ActionBase):
''' Create inventory hosts and groups in the memory inventory'''
@@ -40,9 +42,11 @@ class ActionModule(ActionBase):
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
#vv("creating host via 'add_host': hostname=%s" % new_name)
- new_name, new_port = _parse_ip_host_and_port(new_name)
- if new_port:
- self._task.args['ansible_ssh_port'] = new_port
+ name, port = parse_address(new_name, allow_ranges=False)
+ if not name:
+ raise AnsibleError("Invalid inventory hostname: %s" % new_name)
+ if port:
+ self._task.args['ansible_ssh_port'] = port
groups = self._task.args.get('groupname', self._task.args.get('groups', self._task.args.get('group', '')))
# add it to the group if that was specified
@@ -58,28 +62,4 @@ class ActionModule(ActionBase):
if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
host_vars[k] = self._task.args[k]
- return dict(changed=True, add_host=dict(host_name=new_name, groups=new_groups, host_vars=host_vars))
-
-def _parse_ip_host_and_port(hostname):
- """
- Attempt to parse the hostname and port from a hostname, e.g.,
-
- some-host-name
- some-host-name:80
- 8.8.8.8
- 8.8.8.8:80
- 2001:db8:0:1
- [2001:db8:0:1]:80
- """
- if hostname.count(':') > 1:
- match = re.match(
- '\[(?P<ip>[^\]]+)\](:(?P<port>[0-9]+))?',
- hostname
- )
- if match:
- return match.group('ip'), match.group('port')
- else:
- return hostname, None
- elif ':' in hostname:
- return hostname.rsplit(':', 1)
- return hostname, None
+ return dict(changed=True, add_host=dict(host_name=name, groups=new_groups, host_vars=host_vars))
diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py
index b2fcd8756d..af850b41cb 100644
--- a/lib/ansible/plugins/action/async.py
+++ b/lib/ansible/plugins/action/async.py
@@ -55,7 +55,7 @@ class ActionModule(ActionBase):
async_limit = self._task.async
async_jid = str(random.randint(0, 999999999999))
- async_cmd = " ".join([str(x) for x in [async_module_path, async_jid, async_limit, remote_module_path, argsfile]])
+ async_cmd = " ".join([str(x) for x in [env_string, async_module_path, async_jid, async_limit, remote_module_path, argsfile]])
result = self._low_level_execute_command(cmd=async_cmd, tmp=None)
# clean up after
diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
index 8c9a2ed16d..f963a07cb0 100644
--- a/lib/ansible/plugins/action/fetch.py
+++ b/lib/ansible/plugins/action/fetch.py
@@ -61,7 +61,11 @@ class ActionModule(ActionBase):
remote_data = None
if remote_checksum in ('1', '2') or self._play_context.become:
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
- if slurpres.get('rc') == 0:
+ if slurpres.get('failed'):
+ if remote_checksum == '1' and not fail_on_missing:
+ return dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
+ return slurpres
+ else:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
@@ -72,9 +76,6 @@ class ActionModule(ActionBase):
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
- else:
- # FIXME: should raise an error here? the old code did nothing
- pass
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
index 31d93e7acc..e0c1c088b8 100644
--- a/lib/ansible/plugins/action/include_vars.py
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -19,8 +19,6 @@ __metaclass__ = type
import os
-from types import NoneType
-
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.action import ActionBase
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py
index 9488b9f108..fce79ee22e 100644
--- a/lib/ansible/plugins/action/package.py
+++ b/lib/ansible/plugins/action/package.py
@@ -39,11 +39,15 @@ class ActionModule(ActionBase):
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr'), task_vars=task_vars)
- self._display.degug("Facts %s" % facts)
+ self._display.debug("Facts %s" % facts)
if not 'failed' in facts:
module = getattr(facts['ansible_facts'], 'ansible_pkg_mgr', 'auto')
if module != 'auto':
+
+ if module not in self._shared_loader_obj.module_loader:
+ return {'failed': True, 'msg': 'Could not find a module for %s.' % module}
+
# run the 'package' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py
index fc1704c386..8cceb85e94 100644
--- a/lib/ansible/plugins/action/service.py
+++ b/lib/ansible/plugins/action/service.py
@@ -20,6 +20,7 @@ __metaclass__ = type
from ansible.plugins.action import ActionBase
+
class ActionModule(ActionBase):
TRANSFERS_FILES = False
@@ -43,7 +44,7 @@ class ActionModule(ActionBase):
if not 'failed' in facts:
module = getattr(facts['ansible_facts'], 'ansible_service_mgr', 'auto')
- if not module or module == 'auto':
+ if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:
module = 'service'
if module != 'auto':
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
index 5822fb3f08..2e2c7bd74a 100644
--- a/lib/ansible/plugins/action/set_fact.py
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -14,47 +14,17 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import ast
-
-from six import string_types
+from six import iteritems
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
+from ansible.utils.vars import isidentifier
-def isidentifier(ident):
- """
- Determines, if string is valid Python identifier using the ast module.
- Orignally posted at: http://stackoverflow.com/a/29586366
- """
-
- if not isinstance(ident, string_types):
- return False
-
- try:
- root = ast.parse(ident)
- except SyntaxError:
- return False
-
- if not isinstance(root, ast.Module):
- return False
-
- if len(root.body) != 1:
- return False
-
- if not isinstance(root.body[0], ast.Expr):
- return False
-
- if not isinstance(root.body[0].value, ast.Name):
- return False
-
- if root.body[0].value.id != ident:
- return False
-
- return True
class ActionModule(ActionBase):
@@ -63,7 +33,7 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
facts = dict()
if self._task.args:
- for (k, v) in self._task.args.iteritems():
+ for (k, v) in iteritems(self._task.args):
k = self._templar.template(k)
if not isidentifier(k):
diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py
index 79de84238d..f9a4b49bb7 100644
--- a/lib/ansible/plugins/action/synchronize.py
+++ b/lib/ansible/plugins/action/synchronize.py
@@ -48,9 +48,9 @@ class ActionModule(ActionBase):
if host not in C.LOCALHOST:
if user:
- return '%s@%s:%s' % (user, host, path)
+ return '%s@[%s]:%s' % (user, host, path)
else:
- return '%s:%s' % (host, path)
+ return '[%s]:%s' % (host, path)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
@@ -60,9 +60,9 @@ class ActionModule(ActionBase):
transport = self._play_context.connection
if host not in C.LOCALHOST or transport != "local":
if user:
- return '%s@%s:%s' % (user, host, path)
+ return '%s@[%s]:%s' % (user, host, path)
else:
- return '%s:%s' % (host, path)
+ return '[%s]:%s' % (host, path)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
@@ -77,12 +77,15 @@ class ActionModule(ActionBase):
# connection to the remote host
if 'ansible_syslog_facility' in task_vars:
del task_vars['ansible_syslog_facility']
- for key in task_vars:
+ for key in task_vars.keys():
if key.startswith("ansible_") and key.endswith("_interpreter"):
del task_vars[key]
# Add the definitions from localhost
- localhost = task_vars['hostvars']['127.0.0.1']
+ for host in C.LOCALHOST:
+ if host in task_vars['hostvars']:
+ localhost = task_vars['hostvars'][host]
+ break
if 'ansible_syslog_facility' in localhost:
task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility']
for key in localhost:
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
index 7a68375a44..6f24fb9c1c 100644
--- a/lib/ansible/plugins/action/template.py
+++ b/lib/ansible/plugins/action/template.py
@@ -111,14 +111,18 @@ class ActionModule(ActionBase):
time.localtime(os.path.getmtime(source))
)
- self._templar.environment.searchpath = [self._loader._basedir, os.path.dirname(source)]
+ # Create a new searchpath list to assign to the templar environment's file
+ # loader, so that it knows about the other paths to find template files
+ searchpath = [self._loader._basedir, os.path.dirname(source)]
if self._task._role is not None:
- self._templar.environment.searchpath.insert(1, C.DEFAULT_ROLES_PATH)
- self._templar.environment.searchpath.insert(1, self._task._role._role_path)
+ searchpath.insert(1, C.DEFAULT_ROLES_PATH)
+ searchpath.insert(1, self._task._role._role_path)
+
+ self._templar.environment.loader.searchpath = searchpath
old_vars = self._templar._available_variables
self._templar.set_available_variables(temp_vars)
- resultant = self._templar.template(template_data, preserve_trailing_newlines=True, convert_data=False)
+ resultant = self._templar.template(template_data, preserve_trailing_newlines=True, escape_backslashes=False, convert_data=False)
self._templar.set_available_variables(old_vars)
except Exception as e:
return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
index b5be23069a..8151efe428 100644
--- a/lib/ansible/plugins/action/unarchive.py
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -64,7 +64,7 @@ class ActionModule(ActionBase):
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
- source = self._loader.path_dwim_relative(tself._loader.get_basedir(), 'files', source)
+ source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
remote_checksum = self._remote_checksum(tmp, dest, all_vars=task_vars)
if remote_checksum != '3':
diff --git a/lib/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
index 323d8c5ca3..0938e0983e 100644
--- a/lib/ansible/plugins/cache/__init__.py
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -60,7 +60,7 @@ class FactCache(MutableMapping):
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
- return dict([(k, v) for (k, v) in self.iteritems()])
+ return dict(self)
def keys(self):
return self._plugin.keys()
diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py
index 04e05f9b0c..3fc3458fb9 100644
--- a/lib/ansible/plugins/cache/jsonfile.py
+++ b/lib/ansible/plugins/cache/jsonfile.py
@@ -45,7 +45,7 @@ class CacheModule(BaseCacheModule):
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
self._display.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
return None
@@ -60,7 +60,7 @@ class CacheModule(BaseCacheModule):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = codecs.open(cachefile, 'r', encoding='utf-8')
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
self._display.warning("error while trying to read %s : %s" % (cachefile, str(e)))
pass
else:
@@ -81,7 +81,7 @@ class CacheModule(BaseCacheModule):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = codecs.open(cachefile, 'w', encoding='utf-8')
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
pass
else:
@@ -94,7 +94,7 @@ class CacheModule(BaseCacheModule):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
@@ -126,7 +126,7 @@ class CacheModule(BaseCacheModule):
try:
st = os.stat(cachefile)
return True
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
@@ -137,7 +137,7 @@ class CacheModule(BaseCacheModule):
del self._cache[key]
try:
os.remove("%s/%s" % (self._cache_dir, key))
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
pass #TODO: only pass on non existing?
def flush(self):
diff --git a/lib/ansible/plugins/cache/redis.py b/lib/ansible/plugins/cache/redis.py
index 99ecbffcb4..d31dcad758 100644
--- a/lib/ansible/plugins/cache/redis.py
+++ b/lib/ansible/plugins/cache/redis.py
@@ -75,7 +75,7 @@ class CacheModule(BaseCacheModule):
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
- self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
+ self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
index 25bc1a72ac..3e71d5e3d4 100644
--- a/lib/ansible/plugins/callback/__init__.py
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -200,10 +200,11 @@ class CallbackBase:
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
- host = result._host.get_name()
- #FIXME, get item to pass through
- item = None
- self.runner_on_skipped(host, item)
+ if C.DISPLAY_SKIPPED_HOSTS:
+ host = result._host.get_name()
+ #FIXME, get item to pass through
+ item = None
+ self.runner_on_skipped(host, item)
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
index c963137968..e7485f56da 100644
--- a/lib/ansible/plugins/callback/default.py
+++ b/lib/ansible/plugins/callback/default.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from ansible import constants as C
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
@@ -77,10 +78,11 @@ class CallbackModule(CallbackBase):
self._handle_warnings(result._result)
def v2_runner_on_skipped(self, result):
- msg = "skipping: [%s]" % result._host.get_name()
- if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
- msg += " => %s" % self._dump_results(result._result)
- self._display.display(msg, color='cyan')
+ if C.DISPLAY_SKIPPED_HOSTS:
+ msg = "skipping: [%s]" % result._host.get_name()
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color='cyan')
def v2_runner_on_unreachable(self, result):
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
diff --git a/lib/ansible/plugins/callback/hipchat.py b/lib/ansible/plugins/callback/hipchat.py
index b0d1bfb67e..139b450866 100644
--- a/lib/ansible/plugins/callback/hipchat.py
+++ b/lib/ansible/plugins/callback/hipchat.py
@@ -17,7 +17,6 @@
import os
import urllib
-import urllib2
try:
import prettytable
@@ -26,6 +25,7 @@ except ImportError:
HAS_PRETTYTABLE = False
from ansible.plugins.callback import CallbackBase
+from ansible.module_utils.urls import open_url
class CallbackModule(CallbackBase):
"""This is an example ansible callback plugin that sends status
@@ -82,7 +82,7 @@ class CallbackModule(CallbackBase):
url = ('%s?auth_token=%s' % (self.msg_uri, self.token))
try:
- response = urllib2.urlopen(url, urllib.urlencode(params))
+ response = open_url(url, data=urllib.urlencode(params))
return response.read()
except:
self.display.warning('Could not submit message to hipchat')
diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py
index f873b75ead..28aec1c7df 100644
--- a/lib/ansible/plugins/callback/profile_tasks.py
+++ b/lib/ansible/plugins/callback/profile_tasks.py
@@ -53,8 +53,8 @@ def tasktime():
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
- display(filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed)))
tn = time.time()
+ return filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
@@ -77,7 +77,7 @@ class CallbackModule(CallbackBase):
"""
Logs the start of each task
"""
- tasktime()
+ self._display.display(tasktime())
timestamp(self)
# Record the start time of the current task
@@ -85,11 +85,11 @@ class CallbackModule(CallbackBase):
self.stats[self.current] = time.time()
def playbook_on_setup(self):
- tasktime()
+ self._display.display(tasktime())
def playbook_on_stats(self, stats):
- tasktime()
- display(filled("", fchar="="))
+ self._display.display(tasktime())
+ self._display.display(filled("", fchar="="))
timestamp(self)
@@ -105,7 +105,7 @@ class CallbackModule(CallbackBase):
# Print the timings
for name, elapsed in results:
- self.display.display(
+ self._display.display(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connection/__init__.py
index 1ad2876381..5dfcf4c344 100644
--- a/lib/ansible/plugins/connections/__init__.py
+++ b/lib/ansible/plugins/connection/__init__.py
@@ -155,3 +155,13 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
if incorrect_password in output:
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
+ def lock_connection(self):
+ f = self._play_context.connection_lockfd
+ self._display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f))
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ self._display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f))
+
+ def unlock_connection(self):
+ f = self._play_context.connection_lockfd
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ self._display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f))
diff --git a/lib/ansible/plugins/connections/accelerate.py b/lib/ansible/plugins/connection/accelerate.py
index d0bd5ad3d1..d0bd5ad3d1 100644
--- a/lib/ansible/plugins/connections/accelerate.py
+++ b/lib/ansible/plugins/connection/accelerate.py
diff --git a/lib/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connection/chroot.py
index 5dfd712ef7..6ef3a61cb5 100644
--- a/lib/ansible/plugins/connections/chroot.py
+++ b/lib/ansible/plugins/connection/chroot.py
@@ -20,14 +20,16 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
-import traceback
import os
import shlex
import subprocess
+import traceback
+
+from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible import utils
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.path import is_executable
from ansible.utils.unicode import to_bytes
-import ansible.constants as C
class Connection(ConnectionBase):
@@ -51,13 +53,17 @@ class Connection(ConnectionBase):
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
- if not utils.is_executable(chrootsh):
+ if not is_executable(chrootsh):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
if not self.chroot_cmd:
raise AnsibleError("chroot command not found in PATH")
+ @property
+ def transport(self):
+ ''' used to identify this connection object '''
+ return 'chroot'
def _connect(self, port=None):
''' connect to the chroot; nothing to do here '''
@@ -86,8 +92,8 @@ class Connection(ConnectionBase):
return the process's exit code immediately.
'''
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
+ if sudoable and self._play_context.become and self._play_context.become_method not in self.become_methods_supported:
+ raise AnsibleError("Internal Error: this module does not support running commands via %s" % self._play_context.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
@@ -96,8 +102,9 @@ class Connection(ConnectionBase):
local_cmd = self._generate_cmd(executable, cmd)
self._display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
+ # FIXME: cwd= needs to be set to the basedir of the playbook, which
+ # should come from loader, but is not in the connection plugins
p = subprocess.Popen(local_cmd, shell=False,
- cwd=self.runner.basedir,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -119,7 +126,7 @@ class Connection(ConnectionBase):
try:
with open(in_path, 'rb') as in_file:
try:
- p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
+ p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, self.BUFSIZE), None, stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
@@ -138,16 +145,16 @@ class Connection(ConnectionBase):
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, self.BUFSIZE), None)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(out_path, 'wb+') as out_file:
try:
- chunk = p.stdout.read(BUFSIZE)
+ chunk = p.stdout.read(self.BUFSIZE)
while chunk:
out_file.write(chunk)
- chunk = p.stdout.read(BUFSIZE)
+ chunk = p.stdout.read(self.BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
diff --git a/lib/ansible/plugins/connections/docker.py b/lib/ansible/plugins/connection/docker.py
index 0168f96fbe..1103327470 100644
--- a/lib/ansible/plugins/connections/docker.py
+++ b/lib/ansible/plugins/connection/docker.py
@@ -31,7 +31,7 @@ from distutils.version import LooseVersion
import ansible.constants as C
from ansible import errors
-from ansible.plugins.connections import ConnectionBase
+from ansible.plugins.connection import ConnectionBase
BUFSIZE = 65536
diff --git a/lib/ansible/plugins/connections/funcd.py b/lib/ansible/plugins/connection/funcd.py
index 92bda4bb34..92bda4bb34 100644
--- a/lib/ansible/plugins/connections/funcd.py
+++ b/lib/ansible/plugins/connection/funcd.py
diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connection/jail.py
index d12318391c..d12318391c 100644
--- a/lib/ansible/plugins/connections/jail.py
+++ b/lib/ansible/plugins/connection/jail.py
diff --git a/lib/ansible/plugins/connections/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py
index 1905eb6a66..1905eb6a66 100644
--- a/lib/ansible/plugins/connections/libvirt_lxc.py
+++ b/lib/ansible/plugins/connection/libvirt_lxc.py
diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connection/local.py
index e4eddbd4cb..1b5a0efe29 100644
--- a/lib/ansible/plugins/connections/local.py
+++ b/lib/ansible/plugins/connection/local.py
@@ -28,7 +28,7 @@ import fcntl
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
-from ansible.plugins.connections import ConnectionBase
+from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' Local based connections '''
@@ -70,7 +70,7 @@ class Connection(ConnectionBase):
)
self._display.debug("done running command with Popen()")
- if self._play_context.prompt and self._play_context.become_pass and sudoable:
+ if self._play_context.prompt and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
become_output = ''
diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py
index df97a6e3a5..1960ff8076 100644
--- a/lib/ansible/plugins/connections/paramiko_ssh.py
+++ b/lib/ansible/plugins/connection/paramiko_ssh.py
@@ -39,9 +39,11 @@ import sys
from termios import tcflush, TCIFLUSH
from binascii import hexlify
+from six import iteritems
+
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
-from ansible.plugins.connections import ConnectionBase
+from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import makedirs_safe
AUTHENTICITY_MSG="""
@@ -71,16 +73,15 @@ class MyAddPolicy(object):
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
- def __init__(self, new_stdin):
+ def __init__(self, new_stdin, connection):
self._new_stdin = new_stdin
+ self.connection = connection
def missing_host_key(self, client, hostname, key):
if C.HOST_KEY_CHECKING:
- # FIXME: need to fix lock file stuff
- #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- #fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+ self.connection.lock_connection()
old_stdin = sys.stdin
sys.stdin = self._new_stdin
@@ -94,17 +95,11 @@ class MyAddPolicy(object):
inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
+ self.connection.unlock_connection()
+
if inp not in ['yes','y','']:
- # FIXME: lock file stuff
- #fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
- #fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
raise AnsibleError("host connection rejected by user")
- # FIXME: lock file stuff
- #fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
-
key._added_by_ansible_this_time = True
# existing implementation below:
@@ -159,7 +154,7 @@ class Connection(ConnectionBase):
pass # file was not found, but not required to function
ssh.load_system_host_keys()
- ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin))
+ ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
allow_agent = True
@@ -229,33 +224,32 @@ class Connection(ConnectionBase):
try:
chan.exec_command(cmd)
if self._play_context.prompt:
- if self._play_context.become and self._play_context.become_pass:
- passprompt = False
- while True:
- self._display.debug('Waiting for Privilege Escalation input')
- if self.check_become_success(become_output):
- break
- elif self.check_password_prompt(become_output):
- passprompt = True
- break
-
- chunk = chan.recv(bufsize)
- self._display.debug("chunk is: %s" % chunk)
- if not chunk:
- if 'unknown user' in become_output:
- raise AnsibleError( 'user %s does not exist' % become_user)
- else:
- break
- #raise AnsibleError('ssh connection closed waiting for password prompt')
- become_output += chunk
- if passprompt:
- if self._play_context.become and self._play_context.become_pass:
- chan.sendall(self._play_context.become_pass + '\n')
+ passprompt = False
+ while True:
+ self._display.debug('Waiting for Privilege Escalation input')
+ if self.check_become_success(become_output):
+ break
+ elif self.check_password_prompt(become_output):
+ passprompt = True
+ break
+
+ chunk = chan.recv(bufsize)
+ self._display.debug("chunk is: %s" % chunk)
+ if not chunk:
+ if 'unknown user' in become_output:
+ raise AnsibleError( 'user %s does not exist' % become_user)
else:
- raise AnsibleError("A password is reqired but none was supplied")
+ break
+ #raise AnsibleError('ssh connection closed waiting for password prompt')
+ become_output += chunk
+ if passprompt:
+ if self._play_context.become and self._play_context.become_pass:
+ chan.sendall(self._play_context.become_pass + '\n')
else:
- no_prompt_out += become_output
- no_prompt_err += become_output
+ raise AnsibleError("A password is reqired but none was supplied")
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
@@ -313,8 +307,8 @@ class Connection(ConnectionBase):
def _any_keys_added(self):
added_any = False
- for hostname, keys in self.ssh._host_keys.iteritems():
- for keytype, key in keys.iteritems():
+ for hostname, keys in iteritems(self.ssh._host_keys):
+ for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
@@ -334,18 +328,18 @@ class Connection(ConnectionBase):
f = open(filename, 'w')
- for hostname, keys in self.ssh._host_keys.iteritems():
+ for hostname, keys in iteritems(self.ssh._host_keys):
- for keytype, key in keys.iteritems():
+ for keytype, key in iteritems(keys):
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
- for hostname, keys in self.ssh._host_keys.iteritems():
+ for hostname, keys in iteritems(self.ssh._host_keys):
- for keytype, key in keys.iteritems():
+ for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
@@ -365,6 +359,9 @@ class Connection(ConnectionBase):
if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
+ # (This doesn't acquire the connection lock because it needs
+ # to exclude only other known_hosts writers, not connections
+ # that are starting up.)
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connection/ssh.py
index 9c16168413..c113f538d1 100644
--- a/lib/ansible/plugins/connections/ssh.py
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -36,7 +36,8 @@ from hashlib import sha1
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
-from ansible.plugins.connections import ConnectionBase
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.path import unfrackpath, makedirs_safe
class Connection(ConnectionBase):
''' ssh based connections '''
@@ -49,21 +50,40 @@ class Connection(ConnectionBase):
self._common_args = []
self.HASHED_KEY_MAGIC = "|1|"
- # FIXME: move the lockfile locations to ActionBase?
- #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- #self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
- self._cp_dir = '/tmp'
- #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
+ self.ssh_extra_args = ''
+ self.ssh_args = ''
+
+ def set_host_overrides(self, host):
+ v = host.get_vars()
+ if 'ansible_ssh_extra_args' in v:
+ self.ssh_extra_args = v['ansible_ssh_extra_args']
+ if 'ansible_ssh_args' in v:
+ self.ssh_args = v['ansible_ssh_args']
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'ssh'
+ def _split_args(self, argstring):
+ """
+ Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
+ list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
+ the argument list. The list will not contain any empty elements.
+ """
+ return [x.strip() for x in shlex.split(argstring) if x.strip()]
+
+ def add_args(self, explanation, args):
+ """
+ Adds the given args to _common_args and displays a
+ caller-supplied explanation of why they were added.
+ """
+ self._common_args += args
+ self._display.vvvvv('SSH: ' + explanation + ': (%s)' % ')('.join(args), host=self._play_context.remote_addr)
+
def _connect(self):
''' connect to the remote host '''
@@ -72,16 +92,25 @@ class Connection(ConnectionBase):
if self._connected:
return self
- extra_args = C.ANSIBLE_SSH_ARGS
- if extra_args is not None:
- # make sure there is no empty string added as this can produce weird errors
- self._common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
+ # We start with ansible_ssh_args from the inventory if it's set,
+ # or [ssh_connection]ssh_args from ansible.cfg, or the default
+ # Control* settings.
+
+ if self.ssh_args:
+ args = self._split_args(self.ssh_args)
+ self.add_args("inventory set ansible_ssh_args", args)
+ elif C.ANSIBLE_SSH_ARGS:
+ args = self._split_args(C.ANSIBLE_SSH_ARGS)
+ self.add_args("ansible.cfg set ssh_args", args)
else:
- self._common_args += (
+ args = (
"-o", "ControlMaster=auto",
- "-o", "ControlPersist=60s",
- "-o", "ControlPath=\"{0}\"".format(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)),
+ "-o", "ControlPersist=60s"
)
+ self.add_args("default arguments", args)
+
+ # If any of the above have set ControlPersist but not a
+ # ControlPath, add one ourselves.
cp_in_use = False
cp_path_set = False
@@ -92,27 +121,68 @@ class Connection(ConnectionBase):
cp_path_set = True
if cp_in_use and not cp_path_set:
- self._common_args += ("-o", "ControlPath=\"{0}\"".format(
+ self._cp_dir = unfrackpath('$HOME/.ansible/cp')
+
+ args = ("-o", "ControlPath=\"{0}\"".format(
C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir))
)
+ self.add_args("found only ControlPersist; added ControlPath", args)
+
+ # The directory must exist and be writable.
+ makedirs_safe(self._cp_dir, 0o700)
+ if not os.access(self._cp_dir, os.W_OK):
+ raise AnsibleError("Cannot write to ControlPath %s" % self._cp_dir)
if not C.HOST_KEY_CHECKING:
- self._common_args += ("-o", "StrictHostKeyChecking=no")
+ self.add_args(
+ "ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled",
+ ("-o", "StrictHostKeyChecking=no")
+ )
if self._play_context.port is not None:
- self._common_args += ("-o", "Port={0}".format(self._play_context.port))
- if self._play_context.private_key_file is not None:
- self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self._play_context.private_key_file)))
- if self._play_context.password:
- self._common_args += ("-o", "GSSAPIAuthentication=no",
- "-o", "PubkeyAuthentication=no")
- else:
- self._common_args += ("-o", "KbdInteractiveAuthentication=no",
- "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- "-o", "PasswordAuthentication=no")
- if self._play_context.remote_user is not None and self._play_context.remote_user != pwd.getpwuid(os.geteuid())[0]:
- self._common_args += ("-o", "User={0}".format(self._play_context.remote_user))
- self._common_args += ("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
+ self.add_args(
+ "ANSIBLE_REMOTE_PORT/remote_port/ansible_ssh_port set",
+ ("-o", "Port={0}".format(self._play_context.port))
+ )
+
+ key = self._play_context.private_key_file
+ if key:
+ self.add_args(
+ "ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set",
+ ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(key)))
+ )
+
+ if not self._play_context.password:
+ self.add_args(
+ "ansible_password/ansible_ssh_pass not set", (
+ "-o", "KbdInteractiveAuthentication=no",
+ "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+ "-o", "PasswordAuthentication=no"
+ )
+ )
+
+ user = self._play_context.remote_user
+ if user and user != pwd.getpwuid(os.geteuid())[0]:
+ self.add_args(
+ "ANSIBLE_REMOTE_USER/remote_user/ansible_ssh_user/user/-u set",
+ ("-o", "User={0}".format(self._play_context.remote_user))
+ )
+
+ self.add_args(
+ "ANSIBLE_TIMEOUT/timeout set",
+ ("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
+ )
+
+ # If any extra SSH arguments are specified in the inventory for
+ # this host, or specified as an override on the command line,
+ # add them in.
+
+ if self._play_context.ssh_extra_args:
+ args = self._split_args(self._play_context.ssh_extra_args)
+ self.add_args("command-line added --ssh-extra-args", args)
+ elif self.ssh_extra_args:
+ args = self._split_args(self.ssh_extra_args)
+ self.add_args("inventory added ansible_ssh_extra_args", args)
self._connected = True
@@ -207,73 +277,17 @@ class Connection(ConnectionBase):
stdin.close()
return (p.returncode, stdout, stderr)
- def not_in_host_file(self, host):
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
- try:
- host_fh = open(hf)
- except IOError as e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if not tokens:
- continue
-
- if isinstance(tokens, list) and tokens: # skip invalid hostlines
- if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- if (hfiles_not_found == len(host_file_list)):
- self._display.vvv("EXEC previous known host file not found for {0}".format(host))
- return True
-
def lock_host_keys(self, lock):
- if C.HOST_KEY_CHECKING and self.not_in_host_file(self.host):
- if lock:
- action = fcntl.LOCK_EX
- else:
- action = fcntl.LOCK_UN
+ # lock around the initial SSH connectivity so the user prompt about
+ # whether to add the host to known hosts is not intermingled with
+ # multiprocess output.
+ #
+ # This is a noop for now, pending further investigation. The lock file
+ # should be opened in TaskQueueManager and passed down through the
+ # PlayContext.
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- # FIXME: move the locations of these lock files, same as init above, these came from runner, probably need to be in task_executor
- # fcntl.lockf(self.process_lockfile, action)
- # fcntl.lockf(self.output_lockfile, action)
+ pass
def exec_command(self, *args, **kwargs):
"""
@@ -364,54 +378,53 @@ class Connection(ConnectionBase):
self._display.debug("Handling privilege escalation password prompt.")
- if self._play_context.become and self._play_context.become_pass:
-
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
-
- become_output = ''
- become_errput = ''
- passprompt = False
- while True:
- self._display.debug('Waiting for Privilege Escalation input')
-
- if self.check_become_success(become_output + become_errput):
- self._display.debug('Succeded!')
- break
- elif self.check_password_prompt(become_output) or self.check_password_prompt(become_errput):
- self._display.debug('Password prompt!')
- passprompt = True
- break
-
- self._display.debug('Read next chunks')
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._play_context.timeout)
- if not rfd:
- # timeout. wrap up process communication
- stdout, stderr = p.communicate()
- raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output)
-
- elif p.stderr in rfd:
- chunk = p.stderr.read()
- become_errput += chunk
- self._display.debug('stderr chunk is: %s' % chunk)
- self.check_incorrect_password(become_errput)
-
- elif p.stdout in rfd:
- chunk = p.stdout.read()
- become_output += chunk
- self._display.debug('stdout chunk is: %s' % chunk)
-
-
- if not chunk:
- break
- #raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output)
-
- if passprompt:
- self._display.debug("Sending privilege escalation password.")
- stdin.write(self._play_context.become_pass + '\n')
- else:
- no_prompt_out = become_output
- no_prompt_err = become_errput
+
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ become_output = ''
+ become_errput = ''
+ passprompt = False
+ while True:
+ self._display.debug('Waiting for Privilege Escalation input')
+
+ if self.check_become_success(become_output + become_errput):
+ self._display.debug('Succeded!')
+ break
+ elif self.check_password_prompt(become_output) or self.check_password_prompt(become_errput):
+ self._display.debug('Password prompt!')
+ passprompt = True
+ break
+
+ self._display.debug('Read next chunks')
+ rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._play_context.timeout)
+ if not rfd:
+ # timeout. wrap up process communication
+ stdout, stderr = p.communicate()
+ raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output)
+
+ elif p.stderr in rfd:
+ chunk = p.stderr.read()
+ become_errput += chunk
+ self._display.debug('stderr chunk is: %s' % chunk)
+ self.check_incorrect_password(become_errput)
+
+ elif p.stdout in rfd:
+ chunk = p.stdout.read()
+ become_output += chunk
+ self._display.debug('stdout chunk is: %s' % chunk)
+
+
+ if not chunk:
+ break
+ #raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output)
+
+ if passprompt:
+ self._display.debug("Sending privilege escalation password.")
+ stdin.write(self._play_context.become_pass + '\n')
+ else:
+ no_prompt_out = become_output
+ no_prompt_err = become_errput
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable)
@@ -501,17 +514,18 @@ class Connection(ConnectionBase):
raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
def close(self):
- ''' not applicable since we're executing openssh binaries '''
if self._connected:
- if 'ControlMaster' in self._common_args:
- cmd = ['ssh','-O','stop']
- cmd.extend(self._common_args)
- cmd.append(self._play_context.remote_addr)
+ # TODO: reenable once winrm issues are fixed
+ # temporarily disabled as we are forced to currently close connections after every task because of winrm
+ #if and 'ControlMaster' in self._common_args:
+ # cmd = ['ssh','-O','stop']
+ # cmd.extend(self._common_args)
+ # cmd.append(self._play_context.remote_addr)
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
+ # p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # stdout, stderr = p.communicate()
self._connected = False
diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connection/winrm.py
index 0e19b93ac2..6289318c03 100644
--- a/lib/ansible/plugins/connections/winrm.py
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -19,13 +19,15 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
+import inspect
import os
import re
import shlex
import traceback
-from six.moves.urllib import parse
+from six.moves.urllib.parse import urlunsplit
+from ansible.errors import AnsibleError
try:
from winrm import Response
from winrm.exceptions import WinRMTransportError
@@ -41,8 +43,8 @@ except ImportError:
pass
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
-from ansible.plugins.connections import ConnectionBase
+from ansible.errors import AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.plugins.connection import ConnectionBase
from ansible.plugins import shell_loader
from ansible.utils.path import makedirs_safe
from ansible.utils.unicode import to_bytes, to_unicode
@@ -50,11 +52,6 @@ from ansible.utils.unicode import to_bytes, to_unicode
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
- transport_schemes = {
- 'http': [('kerberos', 'http'), ('plaintext', 'http'), ('plaintext', 'https')],
- 'https': [('kerberos', 'https'), ('plaintext', 'https')],
- }
-
def __init__(self, *args, **kwargs):
self.has_pipelining = False
@@ -74,59 +71,82 @@ class Connection(ConnectionBase):
''' used to identify this connection object from other classes '''
return 'winrm'
- def _winrm_connect(self):
+ def set_host_overrides(self, host):
'''
- Establish a WinRM connection over HTTP/HTTPS.
+ Override WinRM-specific options from host variables.
'''
- port = self._play_context.port or 5986
- self._display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
- (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr)
- netloc = '%s:%d' % (self._play_context.remote_addr, port)
- exc = None
- for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
- if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self._play_context.remote_user):
- continue
+ host_vars = host.get_vars()
- if transport == 'kerberos':
- realm = self._play_context.remote_user.split('@', 1)[1].strip() or None
- else:
- realm = None
+ self._winrm_host = self._play_context.remote_addr
+ self._winrm_port = int(self._play_context.port or 5986)
+ self._winrm_scheme = host_vars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
+ self._winrm_path = host_vars.get('ansible_winrm_path', '/wsman')
+ self._winrm_user = self._play_context.remote_user
+ self._winrm_pass = self._play_context.password
+
+ if '@' in self._winrm_user:
+ self._winrm_realm = self._winrm_user.split('@', 1)[1].strip() or None
+ else:
+ self._winrm_realm = None
+ self._winrm_realm = host_vars.get('ansible_winrm_realm', self._winrm_realm) or None
- endpoint = parse.urlunsplit((scheme, netloc, '/wsman', '', ''))
+ if HAVE_KERBEROS and ('@' in self._winrm_user or self._winrm_realm):
+ self._winrm_transport = 'kerberos,plaintext'
+ else:
+ self._winrm_transport = 'plaintext'
+ self._winrm_transport = host_vars.get('ansible_winrm_transport', self._winrm_transport)
+ if isinstance(self._winrm_transport, basestring):
+ self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
- self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._play_context.remote_addr)
- protocol = Protocol(
- endpoint,
- transport=transport,
- username=self._play_context.remote_user,
- password=self._play_context.password,
- realm=realm
- )
+ self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass, realm=self._winrm_realm)
+ argspec = inspect.getargspec(Protocol.__init__)
+ for arg in argspec.args:
+ if arg in ('self', 'endpoint', 'transport', 'username', 'password', 'realm'):
+ continue
+ if 'ansible_winrm_%s' % arg in host_vars:
+ self._winrm_kwargs[arg] = host_vars['ansible_winrm_%s' % arg]
+ def _winrm_connect(self):
+ '''
+ Establish a WinRM connection over HTTP/HTTPS.
+ '''
+ self._display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
+ (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
+ netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
+ endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
+ errors = []
+ for transport in self._winrm_transport:
+ if transport == 'kerberos' and not HAVE_KERBEROS:
+ errors.append('kerberos: the python kerberos library is not installed')
+ continue
+ self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
+ protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
protocol.send_message('')
return protocol
- except WinRMTransportError as exc:
- err_msg = str(exc)
+ except Exception as e:
+ err_msg = (str(e) or repr(e)).strip()
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
- raise AnsibleError("the connection attempt timed out")
+ raise AnsibleError('the connection attempt timed out')
m = re.search(r'Code\s+?(\d{3})', err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
- raise AnsibleError("the username/password specified for this server was incorrect")
+ err_msg = 'the username/password specified for this server was incorrect'
elif code == 411:
return protocol
- self._display.vvvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self._play_context.remote_addr)
- continue
- if exc:
- raise AnsibleError(str(exc))
+ errors.append('%s: %s' % (transport, err_msg))
+ self._display.vvvvv('WINRM CONNECTION ERROR: %s\n%s' % (err_msg, traceback.format_exc()), host=self._winrm_host)
+ if errors:
+ raise AnsibleError(', '.join(errors))
+ else:
+ raise AnsibleError('No transport found for WinRM connection')
def _winrm_exec(self, command, args=(), from_exec=False):
if from_exec:
- self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
+ self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
- self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
+ self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
if not self.protocol:
self.protocol = self._winrm_connect()
if not self.shell_id:
@@ -136,11 +156,11 @@ class Connection(ConnectionBase):
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args))
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
- self._display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._play_context.remote_addr)
+ self._display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host)
else:
- self._display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._play_context.remote_addr)
- self._display.vvvvvv('WINRM STDOUT %s' % to_unicode(response.std_out), host=self._play_context.remote_addr)
- self._display.vvvvvv('WINRM STDERR %s' % to_unicode(response.std_err), host=self._play_context.remote_addr)
+ self._display.vvvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host)
+ self._display.vvvvvv('WINRM STDOUT %s' % to_unicode(response.std_out), host=self._winrm_host)
+ self._display.vvvvvv('WINRM STDERR %s' % to_unicode(response.std_err), host=self._winrm_host)
return response
finally:
if command_id:
@@ -159,21 +179,21 @@ class Connection(ConnectionBase):
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
- script = ' '.join(['&'] + cmd_parts)
+ script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
- script = ' '.join(['[System.Console]::OutputEncoding = [System.Text.Encoding]::Default;', '&'] + cmd_parts)
+ script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
- script = ' '.join(cmd_parts)
+ script = cmd
if script:
- cmd_parts = self._shell._encode_script(script, as_list=True)
+ cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
- decoded_cmd = to_unicode(base64.b64decode(encoded_cmd))
- self._display.vvv("EXEC %s" % decoded_cmd, host=self._play_context.remote_addr)
+ decoded_cmd = to_unicode(base64.b64decode(encoded_cmd).decode('utf-16-le'))
+ self._display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
- self._display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
+ self._display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception as e:
@@ -186,7 +206,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
- self._display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._play_context.remote_addr)
+ self._display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
with open(in_path) as in_file:
@@ -206,7 +226,7 @@ class Connection(ConnectionBase):
# windows command length), divide by 2.67 (UTF16LE base64 command
# encoding), then by 1.35 again (data base64 encoding).
buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
- for offset in xrange(0, in_size, buffer_size):
+ for offset in xrange(0, in_size or 1, buffer_size):
try:
out_data = in_file.read(buffer_size)
if offset == 0:
@@ -214,7 +234,7 @@ class Connection(ConnectionBase):
out_path = out_path + '.ps1'
b64_data = base64.b64encode(out_data)
script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size)
- self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._play_context.remote_addr)
+ self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
@@ -227,7 +247,7 @@ class Connection(ConnectionBase):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
- self._display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._play_context.remote_addr)
+ self._display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
@@ -256,7 +276,7 @@ class Connection(ConnectionBase):
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
- self._display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._play_context.remote_addr)
+ self._display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connection/zone.py
index 82256742a1..82256742a1 100644
--- a/lib/ansible/plugins/connections/zone.py
+++ b/lib/ansible/plugins/connection/zone.py
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
index 84e055b61f..f0bc536e82 100644
--- a/lib/ansible/plugins/filter/core.py
+++ b/lib/ansible/plugins/filter/core.py
@@ -19,6 +19,7 @@ from __future__ import absolute_import
import sys
import base64
+import itertools
import json
import os.path
import ntpath
@@ -37,11 +38,13 @@ import uuid
import yaml
from jinja2.filters import environmentfilter
from distutils.version import LooseVersion, StrictVersion
+from six import iteritems
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap, to_unicode
+from ansible.utils.vars import merge_hash
try:
import passlib.hash
@@ -150,7 +153,7 @@ def version_compare(value, version, operator='eq', strict=False):
try:
method = getattr(py_operator, operator)
return method(Version(str(value)), Version(str(version)))
- except Exception, e:
+ except Exception as e:
raise errors.AnsibleFilterError('Version comparison: %s' % e)
def regex_escape(string):
@@ -231,6 +234,97 @@ def mandatory(a):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
+def combine(*terms, **kwargs):
+ recursive = kwargs.get('recursive', False)
+ if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
+ raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
+
+ for t in terms:
+ if not isinstance(t, dict):
+ raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
+
+ if recursive:
+ return reduce(merge_hash, terms)
+ else:
+ return dict(itertools.chain(*map(iteritems, terms)))
+
+def comment(text, style='plain', **kw):
+ # Predefined comment types
+ comment_styles = {
+ 'plain': {
+ 'decoration': '# '
+ },
+ 'erlang': {
+ 'decoration': '% '
+ },
+ 'c': {
+ 'decoration': '// '
+ },
+ 'cblock': {
+ 'beginning': '/*',
+ 'decoration': ' * ',
+ 'end': ' */'
+ },
+ 'xml': {
+ 'beginning': '<!--',
+ 'decoration': ' - ',
+ 'end': '-->'
+ }
+ }
+
+ # Pointer to the right comment type
+ style_params = comment_styles[style]
+
+ if 'decoration' in kw:
+ prepostfix = kw['decoration']
+ else:
+ prepostfix = style_params['decoration']
+
+ # Default params
+ p = {
+ 'newline': '\n',
+ 'beginning': '',
+ 'prefix': (prepostfix).rstrip(),
+ 'prefix_count': 1,
+ 'decoration': '',
+ 'postfix': (prepostfix).rstrip(),
+ 'postfix_count': 1,
+ 'end': ''
+ }
+
+ # Update default params
+ p.update(style_params)
+ p.update(kw)
+
+ # Compose substrings for the final string
+ str_beginning = ''
+ if p['beginning']:
+ str_beginning = "%s%s" % (p['beginning'], p['newline'])
+ str_prefix = str(
+ "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
+ str_text = ("%s%s" % (
+ p['decoration'],
+ # Prepend each line of the text with the decorator
+ text.replace(
+ p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
+ # Remove trailing spaces when only decorator is on the line
+ "%s%s" % (p['decoration'], p['newline']),
+ "%s%s" % (p['decoration'].rstrip(), p['newline']))
+ str_postfix = p['newline'].join(
+ [''] + [p['postfix'] for x in range(p['postfix_count'])])
+ str_end = ''
+ if p['end']:
+ str_end = "%s%s" % (p['newline'], p['end'])
+
+ # Return the final string
+ return "%s%s%s%s%s" % (
+ str_beginning,
+ str_prefix,
+ str_text,
+ str_postfix,
+ str_end)
+
+
class FilterModule(object):
''' Ansible core jinja2 filters '''
@@ -300,4 +394,10 @@ class FilterModule(object):
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
+
+ # merge dicts
+ 'combine': combine,
+
+ # comment-style decoration
+ 'comment': comment,
}
diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py
index 516ef1c677..341c2aa2d8 100644
--- a/lib/ansible/plugins/filter/mathstuff.py
+++ b/lib/ansible/plugins/filter/mathstuff.py
@@ -80,14 +80,14 @@ def logarithm(x, base=math.e):
return math.log10(x)
else:
return math.log(x, base)
- except TypeError, e:
+ except TypeError as e:
raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
def power(x, y):
try:
return math.pow(x, y)
- except TypeError, e:
+ except TypeError as e:
raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
@@ -97,7 +97,7 @@ def inversepower(x, base=2):
return math.sqrt(x)
else:
return math.pow(x, 1.0/float(base))
- except TypeError, e:
+ except TypeError as e:
raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
@@ -107,13 +107,13 @@ def human_readable(size, isbits=False, unit=None):
suffix = ''
ranges = (
- (1<<70L, 'Z'),
- (1<<60L, 'E'),
- (1<<50L, 'P'),
- (1<<40L, 'T'),
- (1<<30L, 'G'),
- (1<<20L, 'M'),
- (1<<10L, 'K'),
+ (1<<70, 'Z'),
+ (1<<60, 'E'),
+ (1<<50, 'P'),
+ (1<<40, 'T'),
+ (1<<30, 'G'),
+ (1<<20, 'M'),
+ (1<<10, 'K'),
(1, base)
)
diff --git a/lib/ansible/plugins/lookup/consul_kv.py b/lib/ansible/plugins/lookup/consul_kv.py
index 5da1a5bef0..47eaa71bc8 100755
--- a/lib/ansible/plugins/lookup/consul_kv.py
+++ b/lib/ansible/plugins/lookup/consul_kv.py
@@ -67,7 +67,7 @@ except ImportError:
try:
import consul
HAS_CONSUL = True
-except ImportError, e:
+except ImportError as e:
HAS_CONSUL = False
@@ -104,7 +104,7 @@ class LookupModule(LookupBase):
values.append(r['Value'])
else:
values.append(results[1]['Value'])
- except Exception, e:
+ except Exception as e:
raise AnsibleError(
"Error locating '%s' in kv store. Error was %s" % (term, e))
@@ -127,7 +127,7 @@ class LookupModule(LookupBase):
name, value = param.split('=')
assert name in paramvals, "% not a valid consul lookup parameter" % name
paramvals[name] = value
- except (ValueError, AssertionError), e:
+ except (ValueError, AssertionError) as e:
raise AnsibleError(e)
return paramvals
diff --git a/lib/ansible/plugins/lookup/credstash.py b/lib/ansible/plugins/lookup/credstash.py
index 9d548baea6..41cc6b894f 100644
--- a/lib/ansible/plugins/lookup/credstash.py
+++ b/lib/ansible/plugins/lookup/credstash.py
@@ -41,7 +41,7 @@ class LookupModule(LookupBase):
val = credstash.getSecret(term, **kwargs)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
- except Exception, e:
+ except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e.message))
ret.append(val)
diff --git a/lib/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
index 478f063a12..9ef3e5dded 100644
--- a/lib/ansible/plugins/lookup/csvfile.py
+++ b/lib/ansible/plugins/lookup/csvfile.py
@@ -30,7 +30,7 @@ class LookupModule(LookupBase):
try:
f = codecs.open(filename, 'r', encoding='utf-8')
- creader = csv.reader(f, delimiter=delimiter)
+ creader = csv.reader(f, delimiter=str(delimiter))
for row in creader:
if row[0] == key:
@@ -70,7 +70,7 @@ class LookupModule(LookupBase):
paramvals['delimiter'] = "\t"
lookupfile = self._loader.path_dwim_relative(basedir, 'files', paramvals['file'])
- var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
+ var = self.read_csv(lookupfile, key, str(paramvals['delimiter']), paramvals['default'], paramvals['col'])
if var is not None:
if type(var) is list:
for v in var:
diff --git a/lib/ansible/plugins/lookup/dig.py b/lib/ansible/plugins/lookup/dig.py
index acd73ddc19..a3ba43879b 100644
--- a/lib/ansible/plugins/lookup/dig.py
+++ b/lib/ansible/plugins/lookup/dig.py
@@ -141,7 +141,7 @@ class LookupModule(LookupBase):
try:
nsaddr = dns.resolver.query(ns)[0].address
nameservers.append(nsaddr)
- except Exception, e:
+ except Exception as e:
raise AnsibleError("dns lookup NS: ", str(e))
myres.nameservers = nameservers
continue
@@ -176,7 +176,7 @@ class LookupModule(LookupBase):
domain = n.to_text()
except dns.exception.SyntaxError:
pass
- except Exception, e:
+ except Exception as e:
raise AnsibleError("dns.reversename unhandled exception", str(e))
try:
@@ -196,7 +196,7 @@ class LookupModule(LookupBase):
rd['ttl'] = answers.rrset.ttl
ret.append(rd)
- except Exception, e:
+ except Exception as e:
ret.append(str(e))
except dns.resolver.NXDOMAIN:
@@ -205,7 +205,7 @@ class LookupModule(LookupBase):
ret.append("")
except dns.resolver.Timeout:
ret.append('')
- except dns.exception.DNSException, e:
+ except dns.exception.DNSException as e:
raise AnsibleError("dns.resolver unhandled exception", e)
return ret
diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
index 7ea8f92aaf..9c1adc90ca 100644
--- a/lib/ansible/plugins/lookup/ini.py
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -47,7 +47,7 @@ class LookupModule(LookupBase):
# Retrieve a single value
try:
value = self.cp.get(section, key)
- except ConfigParser.NoOptionError, e:
+ except ConfigParser.NoOptionError as e:
return dflt
return value
@@ -76,7 +76,7 @@ class LookupModule(LookupBase):
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
- except (ValueError, AssertionError), e:
+ except (ValueError, AssertionError) as e:
raise errors.AnsibleError(e)
path = self._loader.path_dwim_relative(basedir, 'files', paramvals['file'])
diff --git a/lib/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
index ff865c28ee..2593aa5d9f 100644
--- a/lib/ansible/plugins/lookup/nested.py
+++ b/lib/ansible/plugins/lookup/nested.py
@@ -32,7 +32,7 @@ class LookupModule(LookupBase):
for x in terms:
try:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
- except UndefinedError, e:
+ except UndefinedError as e:
raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
diff --git a/lib/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py
index 3c80e6811f..7cfecb5e6a 100644
--- a/lib/ansible/plugins/lookup/password.py
+++ b/lib/ansible/plugins/lookup/password.py
@@ -20,7 +20,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-import errno
import string
import random
@@ -29,10 +28,61 @@ from string import ascii_letters, digits
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
+from ansible.parsing.splitter import parse_kv
from ansible.utils.encrypt import do_encrypt
from ansible.utils.path import makedirs_safe
DEFAULT_LENGTH = 20
+VALID_PARAMS = frozenset(('length', 'encrypt', 'chars'))
+
+
+def _parse_parameters(term):
+ # Hacky parsing of params
+ # See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
+ # and the first_found lookup For how we want to fix this later
+ first_split = term.split(' ', 1)
+ if len(first_split) <= 1:
+ # Only a single argument given, therefore it's a path
+ relpath = term
+ params = dict()
+ else:
+ relpath = first_split[0]
+ params = parse_kv(first_split[1])
+ if '_raw_params' in params:
+ # Spaces in the path?
+ relpath = ' '.join((relpath, params['_raw_params']))
+ del params['_raw_params']
+
+ # Check that we parsed the params correctly
+ if not term.startswith(relpath):
+ # Likely, the user had a non parameter following a parameter.
+ # Reject this as a user typo
+ raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
+ # No _raw_params means we already found the complete path when
+ # we split it initially
+
+ # Check for invalid parameters. Probably a user typo
+ invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
+ if invalid_params:
+ raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
+
+ # Set defaults
+ params['length'] = int(params.get('length', DEFAULT_LENGTH))
+ params['encrypt'] = params.get('encrypt', None)
+
+ params['chars'] = params.get('chars', None)
+ if params['chars']:
+ tmp_chars = []
+ if ',,' in params['chars']:
+ tmp_chars.append(u',')
+ tmp_chars.extend(c for c in params['chars'].replace(',,', ',').split(',') if c)
+ params['chars'] = tmp_chars
+ else:
+ # Default chars for password
+ params['chars'] = ['ascii_letters', 'digits', ".,:-_"]
+
+ return relpath, params
+
class LookupModule(LookupBase):
@@ -60,37 +110,7 @@ class LookupModule(LookupBase):
ret = []
for term in terms:
- # you can't have escaped spaces in yor pathname
- params = term.split()
- relpath = params[0]
-
- paramvals = {
- 'length': DEFAULT_LENGTH,
- 'encrypt': None,
- 'chars': ['ascii_letters','digits',".,:-_"],
- }
-
- # get non-default parameters if specified
- try:
- for param in params[1:]:
- name, value = param.split('=')
- assert(name in paramvals)
- if name == 'length':
- paramvals[name] = int(value)
- elif name == 'chars':
- use_chars=[]
- if ",," in value:
- use_chars.append(',')
- use_chars.extend(value.replace(',,',',').split(','))
- paramvals['chars'] = use_chars
- else:
- paramvals[name] = value
- except (ValueError, AssertionError) as e:
- raise AnsibleError(e)
-
- length = paramvals['length']
- encrypt = paramvals['encrypt']
- use_chars = paramvals['chars']
+ relpath, params = _parse_parameters(term)
# get password or create it if file doesn't exist
path = self._loader.path_dwim(relpath)
@@ -101,10 +121,10 @@ class LookupModule(LookupBase):
except OSError as e:
raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
- chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
- password = ''.join(random.choice(chars) for _ in range(length))
+ chars = "".join(getattr(string, c, c) for c in params['chars']).replace('"', '').replace("'", '')
+ password = ''.join(random.choice(chars) for _ in range(params['length']))
- if encrypt is not None:
+ if params['encrypt'] is not None:
salt = self.random_salt()
content = '%s salt=%s' % (password, salt)
else:
@@ -118,28 +138,27 @@ class LookupModule(LookupBase):
if sep >= 0:
password = content[:sep]
- salt = content[sep+1:].split('=')[1]
+ salt = content[sep + 1:].split('=')[1]
else:
password = content
salt = None
# crypt requested, add salt if missing
- if (encrypt is not None and not salt):
+ if (params['encrypt'] is not None and not salt):
salt = self.random_salt()
content = '%s salt=%s' % (password, salt)
with open(path, 'w') as f:
os.chmod(path, 0o600)
f.write(content + '\n')
# crypt not requested, remove salt if present
- elif (encrypt is None and salt):
+ elif (params['encrypt'] is None and salt):
with open(path, 'w') as f:
os.chmod(path, 0o600)
f.write(password + '\n')
- if encrypt:
- password = do_encrypt(password, encrypt, salt=salt)
+ if params['encrypt']:
+ password = do_encrypt(password, params['encrypt'], salt=salt)
ret.append(password)
return ret
-
diff --git a/lib/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py
index a6d133b3f9..319173d5a9 100644
--- a/lib/ansible/plugins/lookup/sequence.py
+++ b/lib/ansible/plugins/lookup/sequence.py
@@ -186,7 +186,7 @@ class LookupModule(LookupBase):
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
- except Exception, e:
+ except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
diff --git a/lib/ansible/plugins/lookup/shelvefile.py b/lib/ansible/plugins/lookup/shelvefile.py
index 89e393694b..8883dc06b9 100644
--- a/lib/ansible/plugins/lookup/shelvefile.py
+++ b/lib/ansible/plugins/lookup/shelvefile.py
@@ -55,7 +55,7 @@ class LookupModule(LookupBase):
assert(name in paramvals)
paramvals[name] = value
- except (ValueError, AssertionError), e:
+ except (ValueError, AssertionError) as e:
# In case "file" or "key" are not present
raise AnsibleError(e)
diff --git a/lib/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
index 16b86c2de5..5fc10d7f52 100644
--- a/lib/ansible/plugins/lookup/template.py
+++ b/lib/ansible/plugins/lookup/template.py
@@ -40,11 +40,12 @@ class LookupModule(LookupBase):
with open(lookupfile, 'r') as f:
template_data = f.read()
- self._templar.environment.searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
+ searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
if 'role_path' in variables:
- self._templar.environment.searchpath.insert(1, C.DEFAULT_ROLES_PATH)
- self._templar.environment.searchpath.insert(1, variables['role_path'])
+ searchpath.insert(1, C.DEFAULT_ROLES_PATH)
+ searchpath.insert(1, variables['role_path'])
+ self._templar.environment.loader.searchpath = searchpath
res = self._templar.template(template_data, preserve_trailing_newlines=True)
ret.append(res)
else:
diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py
index 29751f73ee..1c383d133c 100644
--- a/lib/ansible/plugins/shell/csh.py
+++ b/lib/ansible/plugins/shell/csh.py
@@ -23,6 +23,7 @@ class ShellModule(ShModule):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\\\n'
+ _SHELL_REDIRECT_ALLNULL = '>& /dev/null'
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
index 0e16d34e16..aba3183e76 100644
--- a/lib/ansible/plugins/shell/powershell.py
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -112,12 +112,41 @@ class ShellModule(object):
cmd_parts.insert(0, '&')
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
- catch = '''
- $_obj = @{ failed = $true; $msg = $_ }
- echo $_obj | ConvertTo-Json -Compress -Depth 99
- Exit 1
- '''
- script = 'Try { %s }\nCatch { %s }' % (' '.join(cmd_parts), 'throw')
+ script = '''
+ Try
+ {
+ %s
+ }
+ Catch
+ {
+ $_obj = @{ failed = $true }
+ If ($_.Exception.GetType)
+ {
+ $_obj.Add('msg', $_.Exception.Message)
+ }
+ Else
+ {
+ $_obj.Add('msg', $_.ToString())
+ }
+ If ($_.InvocationInfo.PositionMessage)
+ {
+ $_obj.Add('exception', $_.InvocationInfo.PositionMessage)
+ }
+ ElseIf ($_.ScriptStackTrace)
+ {
+ $_obj.Add('exception', $_.ScriptStackTrace)
+ }
+ Try
+ {
+ $_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
+ }
+ Catch
+ {
+ }
+ Echo $_obj | ConvertTo-Json -Compress -Depth 99
+ Exit 1
+ }
+ ''' % (' '.join(cmd_parts))
if rm_tmp:
rm_tmp = self._escape(self._unquote(rm_tmp))
rm_cmd = 'Remove-Item "%s" -Force -Recurse -ErrorAction SilentlyContinue' % rm_tmp
@@ -149,9 +178,11 @@ class ShellModule(object):
replace = lambda m: substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
- def _encode_script(self, script, as_list=False):
+ def _encode_script(self, script, as_list=False, strict_mode=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_unicode(script)
+ if strict_mode:
+ script = u'Set-StrictMode -Version Latest\r\n%s' % script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = base64.b64encode(script.encode('utf-16-le'))
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
index 1464fd09fa..3b44683776 100644
--- a/lib/ansible/plugins/shell/sh.py
+++ b/lib/ansible/plugins/shell/sh.py
@@ -24,12 +24,15 @@ import ansible.constants as C
import time
import random
+from six import text_type
+
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellModule(object):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
+ _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
def env_prefix(self, **kwargs):
'''Build command prefix with environment variables.'''
@@ -39,7 +42,7 @@ class ShellModule(object):
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
env.update(kwargs)
- return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
+ return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
@@ -53,10 +56,10 @@ class ShellModule(object):
def remove(self, path, recurse=False):
path = pipes.quote(path)
+ cmd = 'rm -f '
if recurse:
- return "rm -rf %s >/dev/null 2>&1" % path
- else:
- return "rm -f %s >/dev/null 2>&1" % path
+ cmd += '-r '
+ return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def mkdtemp(self, basefile=None, system=False, mode=None):
if not basefile:
@@ -70,7 +73,7 @@ class ShellModule(object):
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
- tmp_umask = 0777 & ~mode
+ tmp_umask = 0o777 & ~mode
cmd = '(umask %o && %s)' % (tmp_umask, cmd)
return cmd
@@ -139,5 +142,5 @@ class ShellModule(object):
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
new_cmd = " ".join(cmd_parts)
if rm_tmp:
- new_cmd = '%s; rm -rf "%s" >/dev/null 2>&1' % (new_cmd, rm_tmp)
+ new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
return new_cmd
diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategy/__init__.py
index f6bb3b03aa..0c3f1da9af 100644
--- a/lib/ansible/plugins/strategies/__init__.py
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -20,9 +20,12 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue as Queue
+from six import iteritems, text_type
+
import time
-from ansible.errors import *
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.inventory.group import Group
@@ -30,8 +33,9 @@ from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.role import hash_params
-from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader
+from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader
from ansible.template import Templar
+from ansible.vars.unsafe_proxy import UnsafeProxy
try:
from __main__ import display
@@ -50,7 +54,8 @@ class SharedPluginLoaderObj:
the forked processes over the queue easier
'''
def __init__(self):
- self.basedirs = _basedirs[:]
+ self.action_loader = action_loader
+ self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
@@ -163,7 +168,7 @@ class StrategyBase:
while not self._final_q.empty() and not self._tqm._terminated:
try:
result = self._final_q.get(block=False)
- self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result],))
+ self._display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))
# all host status messages contain 2 entries: (msg, task_result)
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
@@ -204,7 +209,7 @@ class StrategyBase:
if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
- for (entry, role_obj) in iterator._play.ROLE_CACHE[task_result._task._role._role_name].iteritems():
+ for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]):
if role_obj._uuid == task_result._task._role._uuid:
role_obj._had_task_run[host.name] = True
@@ -233,11 +238,36 @@ class StrategyBase:
elif result[0] == 'register_host_var':
# essentially the same as 'set_host_var' below, however we
- # never follow the delegate_to value for registered vars
+ # never follow the delegate_to value for registered vars and
+ # the variable goes in the fact_cache
host = result[1]
var_name = result[2]
var_value = result[3]
- self._variable_manager.set_host_variable(host, var_name, var_value)
+
+ def _wrap_var(v):
+ if isinstance(v, dict):
+ v = _wrap_dict(v)
+ elif isinstance(v, list):
+ v = _wrap_list(v)
+ else:
+ if v is not None and not isinstance(v, UnsafeProxy):
+ v = UnsafeProxy(v)
+ return v
+
+ def _wrap_dict(v):
+ for k in v.keys():
+ if v[k] is not None and not isinstance(v[k], UnsafeProxy):
+ v[k] = _wrap_var(v[k])
+ return v
+
+ def _wrap_list(v):
+ for idx, item in enumerate(v):
+ if item is not None and not isinstance(item, UnsafeProxy):
+ v[idx] = _wrap_var(item)
+ return v
+
+ var_value = _wrap_var(var_value)
+ self._variable_manager.set_nonpersistent_facts(host, {var_name: var_value})
elif result[0] in ('set_host_var', 'set_host_facts'):
host = result[1]
@@ -263,7 +293,10 @@ class StrategyBase:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
elif result[0] == 'set_host_facts':
facts = result[4]
- self._variable_manager.set_host_facts(target_host, facts)
+ if task.action == 'set_fact':
+ self._variable_manager.set_nonpersistent_facts(target_host, facts)
+ else:
+ self._variable_manager.set_host_facts(target_host, facts)
else:
raise AnsibleError("unknown result message received: %s" % result[0])
@@ -355,7 +388,7 @@ class StrategyBase:
groups[group_name] = []
groups[group_name].append(host)
- for group_name, hosts in groups.iteritems():
+ for group_name, hosts in iteritems(groups):
new_group = self._inventory.get_group(group_name)
if not new_group:
# create the new group and add it to inventory
@@ -382,7 +415,7 @@ class StrategyBase:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
- except AnsibleError, e:
+ except AnsibleError as e:
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
iterator.mark_host_failed(host)
@@ -406,6 +439,13 @@ class StrategyBase:
# set the vars for this task from those specified as params to the include
for b in block_list:
+ # first make a copy of the including task, so that each has a unique copy to modify
+ # FIXME: not sure if this is the best way to fix this, as we might be losing
+ # information in the copy. Previously we assigned the include params to
+ # the block variables directly, which caused other problems, so we may
+ # need to figure out a third option if this also presents problems.
+ b._task_include = b._task_include.copy(exclude_block=True)
+ # then we create a temporary set of vars to ensure the variable reference is unique
temp_vars = b._task_include.vars.copy()
temp_vars.update(included_file._args.copy())
b._task_include.vars = temp_vars
@@ -424,64 +464,77 @@ class StrategyBase:
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
- handler_name = handler.get_name()
- if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
- # FIXME: need to use iterator.get_failed_hosts() instead?
- #if not len(self.get_hosts_remaining(iterator._play)):
- # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
- # result = False
- # break
- self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
- host_results = []
- for host in self._notified_handlers[handler_name]:
- if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or play_context.force_handlers):
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
- task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
- self._queue_task(host, handler, task_vars, play_context)
- #handler.flag_for_host(host)
- results = self._process_pending_results(iterator)
- host_results.extend(results)
- results = self._wait_on_pending_results(iterator)
- host_results.extend(results)
-
- # wipe the notification list
- self._notified_handlers[handler_name] = []
-
- try:
- included_files = IncludedFile.process_include_results(
- host_results,
- self._tqm,
- iterator=iterator,
- loader=self._loader,
- variable_manager=self._variable_manager
- )
- except AnsibleError, e:
- return False
-
- if len(included_files) > 0:
- for included_file in included_files:
- try:
- new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
- # for every task in each block brought in by the include, add the list
- # of hosts which included the file to the notified_handlers dict
- for block in new_blocks:
- for task in block.block:
- if task.name in self._notified_handlers:
- for host in included_file._hosts:
- if host.name not in self._notified_handlers[task.name]:
- self._notified_handlers[task.name].append(host)
- else:
- self._notified_handlers[task.name] = included_file._hosts[:]
- # and add the new blocks to the list of handler blocks
- handler_block.block.extend(block.block)
- #iterator._play.handlers.extend(new_blocks)
- except AnsibleError, e:
- for host in included_file._hosts:
- iterator.mark_host_failed(host)
- self._tqm._failed_hosts[host.name] = True
- self._display.warning(str(e))
- continue
- self._display.debug("done running handlers, result is: %s" % result)
+ handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler)
+ templar = Templar(loader=self._loader, variables=handler_vars)
+ handler_name = templar.template(handler.get_name())
+ should_run = handler_name in self._notified_handlers and len(self._notified_handlers[handler_name])
+ if should_run:
+ result = self._do_handler_run(handler, handler_name, iterator=iterator, play_context=play_context)
+ if not result:
+ break
+ return result
+
+ def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
+
+ # FIXME: need to use iterator.get_failed_hosts() instead?
+ #if not len(self.get_hosts_remaining(iterator._play)):
+ # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ # result = False
+ # break
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
+
+ if notified_hosts is None:
+ notified_hosts = self._notified_handlers[handler_name]
+
+ host_results = []
+ for host in notified_hosts:
+ if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or play_context.force_handlers):
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
+ task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
+ self._queue_task(host, handler, task_vars, play_context)
+
+ # collect the results from the handler run
+ host_results = self._wait_on_pending_results(iterator)
+
+ try:
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ self._tqm,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+ except AnsibleError as e:
+ return False
+
+ result = True
+ if len(included_files) > 0:
+ for included_file in included_files:
+ try:
+ new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
+ # for every task in each block brought in by the include, add the list
+ # of hosts which included the file to the notified_handlers dict
+ for block in new_blocks:
+ iterator._play.handlers.append(block)
+ for task in block.block:
+ result = self._do_handler_run(
+ handler=task,
+ iterator=iterator,
+ play_context=play_context,
+ notified_hosts=included_file._hosts[:],
+ )
+ if not result:
+ break
+ except AnsibleError as e:
+ for host in included_file._hosts:
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ self._display.warning(str(e))
+ continue
+
+ # wipe the notification list
+ self._notified_handlers[handler_name] = []
+ self._display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
diff --git a/lib/ansible/plugins/strategies/free.py b/lib/ansible/plugins/strategy/free.py
index 5bc0d8db36..e44d872fec 100644
--- a/lib/ansible/plugins/strategies/free.py
+++ b/lib/ansible/plugins/strategy/free.py
@@ -23,7 +23,7 @@ import time
from ansible.errors import *
from ansible.playbook.included_file import IncludedFile
-from ansible.plugins.strategies import StrategyBase
+from ansible.plugins.strategy import StrategyBase
try:
from __main__ import display
@@ -55,7 +55,7 @@ class StrategyModule(StrategyBase):
work_to_do = True
while work_to_do and not self._tqm._terminated:
- hosts_left = self._inventory.get_hosts(iterator._play.hosts)
+ hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
@@ -144,7 +144,7 @@ class StrategyModule(StrategyBase):
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
- except AnsibleError, e:
+ except AnsibleError as e:
return False
if len(included_files) > 0:
@@ -153,7 +153,7 @@ class StrategyModule(StrategyBase):
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
- except AnsibleError, e:
+ except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategy/linear.py
index 527843c692..d1e092b739 100644
--- a/lib/ansible/plugins/strategies/linear.py
+++ b/lib/ansible/plugins/strategy/linear.py
@@ -19,15 +19,24 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from six import iteritems, text_type
+
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
-from ansible.plugins.strategies import StrategyBase
+from ansible.plugins.strategy import StrategyBase
from ansible.template import Templar
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
+
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
@@ -43,8 +52,10 @@ class StrategyModule(StrategyBase):
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
+ display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
+ display.debug("done building task lists")
num_setups = 0
num_tasks = 0
@@ -53,7 +64,8 @@ class StrategyModule(StrategyBase):
lowest_cur_block = len(iterator._blocks)
- for (k, v) in host_tasks.iteritems():
+ display.debug("counting tasks in each state of execution")
+ for (k, v) in iteritems(host_tasks):
if v is None:
continue
@@ -72,6 +84,7 @@ class StrategyModule(StrategyBase):
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
+ display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
@@ -83,6 +96,7 @@ class StrategyModule(StrategyBase):
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
+ display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
@@ -92,36 +106,39 @@ class StrategyModule(StrategyBase):
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
- #if new_t != t:
- # raise AnsibleError("iterator error, wtf?") FIXME
rvals.append((host, t))
else:
rvals.append((host, noop_task))
+ display.debug("done advancing hosts to next task")
return rvals
-
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
+ display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
+ display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
+ display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
+ display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
+ display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
@@ -138,7 +155,7 @@ class StrategyModule(StrategyBase):
try:
self._display.debug("getting the remaining hosts for this loop")
- hosts_left = self._inventory.get_hosts(iterator._play.hosts)
+ hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
@@ -200,15 +217,22 @@ class StrategyModule(StrategyBase):
self._display.debug("done getting variables")
if not callback_sent:
- temp_task = task.copy()
+ display.debug("sending task start callback, copying the task so we can template it temporarily")
+ saved_name = task.name
+ display.debug("done copying, going to template now")
try:
- temp_task.name = unicode(templar.template(temp_task.name, fail_on_undefined=False))
+ task.name = text_type(templar.template(task.name, fail_on_undefined=False))
+ display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
+ display.debug("templating failed for some reason")
pass
- self._tqm.send_callback('v2_playbook_on_task_start', temp_task, is_conditional=False)
+ display.debug("here goes the callback...")
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ task.name = saved_name
callback_sent = True
+ display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
@@ -236,7 +260,7 @@ class StrategyModule(StrategyBase):
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
- except AnsibleError, e:
+ except AnsibleError as e:
return False
if len(included_files) > 0:
@@ -251,7 +275,7 @@ class StrategyModule(StrategyBase):
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
- except AnsibleError, e:
+ except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
@@ -274,7 +298,7 @@ class StrategyModule(StrategyBase):
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
- except (IOError, EOFError), e:
+ except (IOError, EOFError) as e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
index 1a1465139a..dc74d2cb56 100644
--- a/lib/ansible/template/__init__.py
+++ b/lib/ansible/template/__init__.py
@@ -22,6 +22,7 @@ __metaclass__ = type
import ast
import re
+from six import string_types
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
@@ -30,14 +31,13 @@ from jinja2.runtime import StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable
-from ansible.plugins import _basedirs, filter_loader, lookup_loader, test_loader
+from ansible.plugins import filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.debug import debug
from numbers import Number
-from types import NoneType
__all__ = ['Templar']
@@ -49,6 +49,65 @@ NON_TEMPLATED_TYPES = ( bool, Number )
JINJA2_OVERRIDE = '#jinja2:'
+def _escape_backslashes(data, jinja_env):
+ """Double backslashes within jinja2 expressions
+
+ A user may enter something like this in a playbook::
+
+ debug:
+ msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
+
+ The string inside of the {{ gets interpreted multiple times First by yaml.
+ Then by python. And finally by jinja2 as part of it's variable. Because
+ it is processed by both python and jinja2, the backslash escaped
+ characters get unescaped twice. This means that we'd normally have to use
+ four backslashes to escape that. This is painful for playbook authors as
+ they have to remember different rules for inside vs outside of a jinja2
+ expression (The backslashes outside of the "{{ }}" only get processed by
+ yaml and python. So they only need to be escaped once). The following
+ code fixes this by automatically performing the extra quoting of
+ backslashes inside of a jinja2 expression.
+
+ """
+ if '\\' in data and '{{' in data:
+ new_data = []
+ d2 = jinja_env.preprocess(data)
+ in_var = False
+
+ for token in jinja_env.lex(d2):
+ if token[1] == 'variable_begin':
+ in_var = True
+ new_data.append(token[2])
+ elif token[1] == 'variable_end':
+ in_var = False
+ new_data.append(token[2])
+ elif in_var and token[1] == 'string':
+ # Double backslashes only if we're inside of a jinja2 variable
+ new_data.append(token[2].replace('\\','\\\\'))
+ else:
+ new_data.append(token[2])
+
+ data = ''.join(new_data)
+
+ return data
+
+def _count_newlines_from_end(in_str):
+ '''
+ Counts the number of newlines at the end of a string. This is used during
+ the jinja2 templating to ensure the count matches the input, since some newlines
+ may be thrown away during the templating.
+ '''
+
+ try:
+ i = len(in_str)
+ j = i -1
+ while in_str[j] == '\n':
+ j -= 1
+ return i - 1 - j
+ except IndexError:
+ # Uncommon cases: zero length string and string containing only newlines
+ return i
+
class Templar:
'''
The main class for templating, with the main entry-point of template().
@@ -89,21 +148,6 @@ class Templar:
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
- def _count_newlines_from_end(self, in_str):
- '''
- Counts the number of newlines at the end of a string. This is used during
- the jinja2 templating to ensure the count matches the input, since some newlines
- may be thrown away during the templating.
- '''
-
- i = len(in_str)
- while i > 0:
- if in_str[i-1] != '\n':
- break
- i -= 1
-
- return len(in_str) - i
-
def _get_filters(self):
'''
Returns filter plugins, after loading and caching them if need be
@@ -163,18 +207,28 @@ class Templar:
assert isinstance(variables, dict)
self._available_variables = variables.copy()
- def template(self, variable, convert_bare=False, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None, convert_data=True):
+ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
+ # Don't template unsafe variables, instead drop them back down to
+ # their constituent type.
+ if hasattr(variable, '__UNSAFE__'):
+ if isinstance(variable, unicode):
+ return unicode(variable)
+ elif isinstance(variable, str):
+ return str(variable)
+ else:
+ return variable
+
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
- if isinstance(variable, basestring):
+ if isinstance(variable, string_types):
result = variable
if self._contains_vars(variable):
@@ -188,10 +242,10 @@ class Templar:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
- elif isinstance(resolved_val, NoneType):
+ elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
- result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
+ result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides)
if convert_data:
# if this looks like a dictionary or list, convert it to such using the safe_eval method
@@ -207,11 +261,13 @@ class Templar:
return result
elif isinstance(variable, (list, tuple)):
- return [self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
+ return [self.template(v, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
elif isinstance(variable, dict):
d = {}
- for (k, v) in variable.iteritems():
- d[k] = self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
+ # we don't use iteritems() here to avoid problems if the underlying dict
+ # changes sizes due to the templating, which can happen with hostvars
+ for k in variable.keys():
+ d[k] = self.template(variable[k], preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
return d
else:
return variable
@@ -234,7 +290,7 @@ class Templar:
in jinja2 variable braces so that it is evaluated properly.
'''
- if isinstance(variable, basestring):
+ if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
@@ -261,17 +317,24 @@ class Templar:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
- except Exception, e:
+ except Exception as e:
if self._fail_on_lookup_errors:
raise
ran = None
+
if ran:
- ran = ",".join(ran)
+ from ansible.vars.unsafe_proxy import UnsafeProxy
+ ran = UnsafeProxy(",".join(ran))
+
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
- def _do_template(self, data, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None):
+ def _do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None):
+
+ # For preserving the number of input newlines in the output (used
+ # later in this method)
+ data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
@@ -297,11 +360,16 @@ class Templar:
myenv.filters.update(self._get_filters())
myenv.tests.update(self._get_tests())
+ if escape_backslashes:
+ # Allow users to specify backslashes in playbooks as "\\"
+ # instead of as "\\\\".
+ data = _escape_backslashes(data, myenv)
+
try:
t = myenv.from_string(data)
- except TemplateSyntaxError, e:
+ except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s" % str(e))
- except Exception, e:
+ except Exception as e:
if 'recursion' in str(e):
raise AnsibleError("recursive loop detected in template string: %s" % data)
else:
@@ -317,7 +385,7 @@ class Templar:
try:
res = j2_concat(rf)
- except TypeError, te:
+ except TypeError as te:
if 'StrictUndefined' in str(te):
raise AnsibleUndefinedVariable(
"Unable to look up a name or access an attribute in template string. " + \
@@ -332,13 +400,20 @@ class Templar:
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
- res_newlines = self._count_newlines_from_end(res)
- data_newlines = self._count_newlines_from_end(data)
+ #
+ # jinja2 added a keep_trailing_newline option in 2.7 when
+ # creating an Environment. That would let us make this code
+ # better (remove a single newline if
+ # preserve_trailing_newlines is False). Once we can depend on
+ # that version being present, modify our code to set that when
+ # initializing self.environment and remove a single trailing
+ # newline here if preserve_newlines is False.
+ res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += '\n' * (data_newlines - res_newlines)
return res
- except (UndefinedError, AnsibleUndefinedVariable), e:
+ except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
index 5e2d1e1fe3..8e06512d12 100644
--- a/lib/ansible/template/safe_eval.py
+++ b/lib/ansible/template/safe_eval.py
@@ -20,6 +20,7 @@ __metaclass__ = type
import ast
import sys
+from six import string_types
from six.moves import builtins
from ansible import constants as C
@@ -66,13 +67,21 @@ def safe_eval(expr, locals={}, include_exceptions=False):
)
# AST node types were expanded after 2.6
- if not sys.version.startswith('2.6'):
- SAFE_NODES.union(
+ if sys.version_info[:2] >= (2, 7):
+ SAFE_NODES.update(
set(
(ast.Set,)
)
)
+ # And in Python 3.4 too
+ if sys.version_info[:2] >= (3, 4):
+ SAFE_NODES.update(
+ set(
+ (ast.NameConstant,)
+ )
+ )
+
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
@@ -96,7 +105,7 @@ def safe_eval(expr, locals={}, include_exceptions=False):
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
- if not isinstance(expr, basestring):
+ if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
@@ -107,7 +116,7 @@ def safe_eval(expr, locals={}, include_exceptions=False):
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
- result = eval(compiled, {}, locals)
+ result = eval(compiled, {}, dict(locals))
if include_exceptions:
return (result, None)
diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py
index 8f9af9506b..08a5ce1a8e 100644
--- a/lib/ansible/template/vars.py
+++ b/lib/ansible/template/vars.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from six import iteritems
from jinja2.utils import missing
__all__ = ['AnsibleJ2Vars']
@@ -46,7 +47,7 @@ class AnsibleJ2Vars:
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
- for key, val in locals.iteritems():
+ for key, val in iteritems(locals):
if key[:2] == 'l_' and val is not missing:
self._locals[key[2:]] = val
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
index 1be5f6da6e..63bd84df5a 100644
--- a/lib/ansible/utils/listify.py
+++ b/lib/ansible/utils/listify.py
@@ -20,6 +20,9 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Iterable
+
+from six import string_types
+
from ansible.template import Templar
from ansible.template.safe_eval import safe_eval
@@ -28,14 +31,14 @@ __all__ = ['listify_lookup_plugin_terms']
#FIXME: probably just move this into lookup plugin base class
def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=False, convert_bare=True):
- if isinstance(terms, basestring):
+ if isinstance(terms, string_types):
stripped = terms.strip()
#FIXME: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override
terms = templar.template(terms, convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
- if isinstance(terms, basestring) or not isinstance(terms, Iterable):
+ if isinstance(terms, string_types) or not isinstance(terms, Iterable):
terms = [ terms ]
return terms
diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py
index f1a2bbbd1a..ebb1f52779 100644..100755
--- a/lib/ansible/utils/module_docs.py
+++ b/lib/ansible/utils/module_docs.py
@@ -26,6 +26,12 @@ import traceback
from collections import MutableMapping, MutableSet, MutableSequence
from ansible.plugins import fragment_loader
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
'async_wrapper', 'accelerate', 'async_status'
@@ -56,7 +62,8 @@ def get_docstring(filename, verbose=False):
try:
theid = t.id
except AttributeError as e:
- continue #TODO: should log these to figure out why this happens
+ # skip errors can happen when trying to use the normal code
+ continue
if 'DOCUMENTATION' in theid:
doc = yaml.safe_load(child.value.s)
@@ -110,9 +117,8 @@ def get_docstring(filename, verbose=False):
elif 'RETURN' in theid:
returndocs = child.value.s[1:]
except:
- traceback.print_exc() # temp
+ display.error("unable to parse %s" % filename)
if verbose == True:
- traceback.print_exc()
- print "unable to parse %s" % filename
+ raise
return doc, plainexamples, returndocs
diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py
index bafb7b4c15..3826f7aba2 100644
--- a/lib/ansible/utils/module_docs_fragments/cloudstack.py
+++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py
@@ -48,18 +48,25 @@ options:
- HTTP timeout.
required: false
default: 10
+ api_region:
+ description:
+ - Name of the ini section in the C(cloustack.ini) file.
+ required: false
+ default: cloudstack
requirements:
- "python >= 2.6"
- - cs
+ - "cs >= 0.6.10"
notes:
- Ansible uses the C(cs) library's configuration method if credentials are not
- provided by the options C(api_url), C(api_key), C(api_secret).
+ provided by the arguments C(api_url), C(api_key), C(api_secret).
Configuration is read from several locations, in the following order.
- The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables.
- A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
- A C(cloudstack.ini) file in the current working directory.
- A C(.cloudstack.ini) file in the users home directory.
- See https://github.com/exoscale/cs for more information.
+ Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
+ Use the argument C(api_region) to select the section name, default section is C(cloudstack).
+ See https://github.com/exoscale/cs for more information.
- This module supports check mode.
'''
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index 84322078ec..be314411d6 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -102,7 +102,7 @@ requirements:
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
- may be user instead of providing explicit values.
+ may be used instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py
index b271e7ed4b..ffac578243 100644
--- a/lib/ansible/utils/path.py
+++ b/lib/ansible/utils/path.py
@@ -22,11 +22,7 @@ import stat
from time import sleep
from errno import EEXIST
-__all__ = ['is_executable', 'unfrackpath']
-
-def is_executable(path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
+__all__ = ['unfrackpath']
def unfrackpath(path):
'''
@@ -45,6 +41,6 @@ def makedirs_safe(path, mode=None):
os.makedirs(path, mode)
else:
os.makedirs(path)
- except OSError, e:
+ except OSError as e:
if e.errno != EEXIST:
raise
diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py
index 2cff2e5e45..a63c1960e1 100644
--- a/lib/ansible/utils/unicode.py
+++ b/lib/ansible/utils/unicode.py
@@ -215,7 +215,7 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
return obj
elif nonstring == 'simplerepr':
try:
- simple = binary_type(obj)
+ simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py
index bfbc9d1a82..c1de70a1ce 100644
--- a/lib/ansible/utils/vars.py
+++ b/lib/ansible/utils/vars.py
@@ -19,35 +19,66 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import ast
+from json import JSONEncoder
+from collections import MutableMapping
+
+from six import iteritems, string_types
from ansible import constants as C
+from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
from ansible.utils.unicode import to_unicode
+def _validate_mutable_mappings(a, b):
+ """
+ Internal convenience function to ensure arguments are MutableMappings
+
+ This checks that all arguments are MutableMappings or raises an error
+
+ :raises AnsibleError: if one of the arguments is not a MutableMapping
+ """
+
+ # If this becomes generally needed, change the signature to operate on
+ # a variable number of arguments instead.
+
+ if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
+ raise AnsibleError("failed to combine variables, expected dicts but"
+ " got a '{0}' and a '{1}'".format(
+ a.__class__.__name__, b.__class__.__name__))
+
def combine_vars(a, b):
+ """
+ Return a copy of dictionaries of variables based on configured hash behavior
+ """
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
- return dict(a.items() + b.items())
+ # HASH_BEHAVIOUR == 'replace'
+ _validate_mutable_mappings(a, b)
+ result = a.copy()
+ result.update(b)
+ return result
def merge_hash(a, b):
- ''' recursively merges hash b into a
- keys from b take precedence over keys from a '''
-
- result = {}
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = merge_hash(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
+ """
+ Recursively merges hash b into a so that keys from b take precedence over keys from a
+ """
+
+ _validate_mutable_mappings(a, b)
+ result = a.copy()
+
+ # next, iterate over b keys and values
+ for k, v in iteritems(b):
+ # if there's already such key in a
+ # and that key contains a MutableMapping
+ if k in result and isinstance(result[k], MutableMapping):
+ # merge those dicts recursively
+ result[k] = merge_hash(result[k], v)
+ else:
+ # otherwise, just copy the value from b to a
+ result[k] = v
return result
@@ -66,3 +97,35 @@ def load_extra_vars(loader, options):
data = parse_kv(extra_vars_opt)
extra_vars = combine_vars(extra_vars, data)
return extra_vars
+
+def isidentifier(ident):
+ """
+ Determines, if string is valid Python identifier using the ast module.
+ Orignally posted at: http://stackoverflow.com/a/29586366
+ """
+
+ if not isinstance(ident, string_types):
+ return False
+
+ try:
+ root = ast.parse(ident)
+ except SyntaxError:
+ return False
+
+ if not isinstance(root, ast.Module):
+ return False
+
+ if len(root.body) != 1:
+ return False
+
+ if not isinstance(root.body[0], ast.Expr):
+ return False
+
+ if not isinstance(root.body[0].value, ast.Name):
+ return False
+
+ if root.body[0].value.id != ident:
+ return False
+
+ return True
+
diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
index 27f7221252..d31fd43581 100644
--- a/lib/ansible/vars/__init__.py
+++ b/lib/ansible/vars/__init__.py
@@ -33,27 +33,50 @@ except ImportError:
from ansible import constants as C
from ansible.cli import CLI
-from ansible.errors import *
+from ansible.errors import AnsibleError
+from ansible.inventory.host import Host
from ansible.parsing import DataLoader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
+from ansible.utils.vars import combine_vars
from ansible.vars.hostvars import HostVars
+from ansible.vars.unsafe_proxy import UnsafeProxy
CACHED_VARS = dict()
+def preprocess_vars(a):
+ '''
+ Ensures that vars contained in the parameter passed in are
+ returned as a list of dictionaries, to ensure for instance
+ that vars loaded from a file conform to an expected state.
+ '''
+
+ if a is None:
+ return None
+ elif not isinstance(a, list):
+ data = [ a ]
+ else:
+ data = a
+
+ for item in data:
+ if not isinstance(item, MutableMapping):
+ raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
+
+ return data
+
class VariableManager:
def __init__(self):
- self._fact_cache = FactCache()
- self._vars_cache = defaultdict(dict)
- self._extra_vars = defaultdict(dict)
- self._host_vars_files = defaultdict(dict)
+ self._fact_cache = FactCache()
+ self._nonpersistent_fact_cache = defaultdict(dict)
+ self._vars_cache = defaultdict(dict)
+ self._extra_vars = defaultdict(dict)
+ self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
- self._inventory = None
-
- self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
+ self._inventory = None
+ self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
@@ -104,50 +127,8 @@ class VariableManager:
return data
- def _validate_both_dicts(self, a, b):
- '''
- Validates that both arguments are dictionaries, or an error is raised.
- '''
- if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
- raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__))
- def _combine_vars(self, a, b):
- '''
- Combines dictionaries of variables, based on the hash behavior
- '''
-
- self._validate_both_dicts(a, b)
-
- if C.DEFAULT_HASH_BEHAVIOUR == "merge":
- return self._merge_dicts(a, b)
- else:
- return dict(a.items() + b.items())
-
- def _merge_dicts(self, a, b):
- '''
- Recursively merges dict b into a, so that keys
- from b take precedence over keys from a.
- '''
-
- result = dict()
-
- self._validate_both_dicts(a, b)
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = self._merge_dicts(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
-
- return result
-
- def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, use_cache=True):
+ def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
@@ -159,11 +140,11 @@ class VariableManager:
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- - vars_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
+ - vars_cache[host] (if there is a host context)
- extra vars
'''
@@ -179,49 +160,64 @@ class VariableManager:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
- all_vars = self._combine_vars(all_vars, role.get_default_vars())
+ all_vars = combine_vars(all_vars, role.get_default_vars())
+
+ # if we have a task in this context, and that task has a role, make
+ # sure it sees its defaults above any other roles, as we previously
+ # (v1) made sure each task had a copy of its roles default vars
+ if task and task._role is not None:
+ all_vars = combine_vars(all_vars, task._role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
- # we merge in the special 'all' group_vars first, if they exist
+ # we merge in vars from groups specified in the inventory (INI or script)
+ all_vars = combine_vars(all_vars, host.get_group_vars())
+
+ # then we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
- data = self._preprocess_vars(self._group_vars_files['all'])
+ data = preprocess_vars(self._group_vars_files['all'])
for item in data:
- all_vars = self._combine_vars(all_vars, item)
+ all_vars = combine_vars(all_vars, item)
for group in host.get_groups():
- all_vars = self._combine_vars(all_vars, group.get_vars())
if group.name in self._group_vars_files and group.name != 'all':
- data = self._preprocess_vars(self._group_vars_files[group.name])
- for item in data:
- all_vars = self._combine_vars(all_vars, item)
+ for data in self._group_vars_files[group.name]:
+ data = preprocess_vars(data)
+ for item in data:
+ all_vars = combine_vars(all_vars, item)
+ # then we merge in vars from the host specified in the inventory (INI or script)
+ all_vars = combine_vars(all_vars, host.get_vars())
+
+ # then we merge in the host_vars/<hostname> file, if it exists
host_name = host.get_name()
if host_name in self._host_vars_files:
- data = self._preprocess_vars(self._host_vars_files[host_name])
- for item in data:
- all_vars = self._combine_vars(all_vars, self._host_vars_files[host_name])
-
- # then we merge in vars specified for this host
- all_vars = self._combine_vars(all_vars, host.get_vars())
+ for data in self._host_vars_files[host_name]:
+ data = preprocess_vars(data)
+ for item in data:
+ all_vars = combine_vars(all_vars, item)
- # next comes the facts cache and the vars cache, respectively
+ # finally, the facts caches for this host, if it exists
try:
- all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.name, dict()))
+ host_facts = self._fact_cache.get(host.name, dict())
+ for k in host_facts.keys():
+ if host_facts[k] is not None and not isinstance(host_facts[k], UnsafeProxy):
+ host_facts[k] = UnsafeProxy(host_facts[k])
+ all_vars = combine_vars(all_vars, host_facts)
except KeyError:
pass
if play:
- all_vars = self._combine_vars(all_vars, play.get_vars())
+ all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
try:
# create a set of temporary vars here, which incorporate the
# extra vars so we can properly template the vars_files entries
- temp_vars = self._combine_vars(all_vars, self._extra_vars)
+ temp_vars = combine_vars(all_vars, self._extra_vars)
templar = Templar(loader=loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
@@ -235,29 +231,30 @@ class VariableManager:
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
for vars_file in vars_file_list:
- data = self._preprocess_vars(loader.load_from_file(vars_file))
+ data = preprocess_vars(loader.load_from_file(vars_file))
if data is not None:
for item in data:
- all_vars = self._combine_vars(all_vars, item)
+ all_vars = combine_vars(all_vars, item)
break
else:
raise AnsibleError("vars file %s was not found" % vars_file_item)
- except UndefinedError, e:
+ except UndefinedError:
continue
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
- all_vars = self._combine_vars(all_vars, role.get_vars())
+ all_vars = combine_vars(all_vars, role.get_vars())
if task:
if task._role:
- all_vars = self._combine_vars(all_vars, task._role.get_vars())
- all_vars = self._combine_vars(all_vars, task.get_vars())
+ all_vars = combine_vars(all_vars, task._role.get_vars())
+ all_vars = combine_vars(all_vars, task.get_vars())
if host:
- all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
+ all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
+ all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
- all_vars = self._combine_vars(all_vars, self._extra_vars)
+ all_vars = combine_vars(all_vars, self._extra_vars)
# FIXME: make sure all special vars are here
# Finally, we create special vars
@@ -265,10 +262,13 @@ class VariableManager:
all_vars['playbook_dir'] = loader.get_basedir()
if host:
- all_vars['groups'] = [group.name for group in host.get_groups()]
+ all_vars['group_names'] = [group.name for group in host.get_groups()]
if self._inventory is not None:
- all_vars['groups'] = self._inventory.groups_list()
+ all_vars['groups'] = dict()
+ for (group_name, group) in self._inventory.groups.iteritems():
+ all_vars['groups'][group_name] = [h.name for h in group.get_hosts()]
+
if include_hostvars:
hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader)
all_vars['hostvars'] = hostvars
@@ -277,6 +277,38 @@ class VariableManager:
if task._role:
all_vars['role_path'] = task._role._role_path
+ # if we have a task and we're delegating to another host, figure out the
+ # variables for that host now so we don't have to rely on hostvars later
+ if task.delegate_to is not None and include_delegate_to:
+ # we unfortunately need to template the delegate_to field here,
+ # as we're fetching vars before post_validate has been called on
+ # the task that has been passed in
+ templar = Templar(loader=loader, variables=all_vars)
+ delegated_host_name = templar.template(task.delegate_to)
+
+ # now try to find the delegated-to host in inventory, or failing that,
+ # create a new host on the fly so we can fetch variables for it
+ delegated_host = None
+ if self._inventory is not None:
+ delegated_host = self._inventory.get_host(delegated_host_name)
+ # try looking it up based on the address field, and finally
+ # fall back to creating a host on the fly to use for the var lookup
+ if delegated_host is None:
+ for h in self._inventory.get_hosts(ignore_limits_and_restrictions=True):
+ # check if the address matches, or if both the delegated_to host
+ # and the current host are in the list of localhost aliases
+ if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST:
+ delegated_host = h
+ break
+ else:
+ delegated_host = Host(name=delegated_host_name)
+ else:
+ delegated_host = Host(name=delegated_host_name)
+
+ # now we go fetch the vars for the delegated-to host and save them in our
+ # master dictionary of variables to be used later in the TaskExecutor/PlayContext
+ all_vars['ansible_delegated_vars'] = self.get_vars(loader=loader, play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False)
+
if self._inventory is not None:
all_vars['inventory_dir'] = self._inventory.basedir()
if play:
@@ -288,10 +320,8 @@ class VariableManager:
all_vars['play_hosts'] = host_list
all_vars['ansible_play_hosts'] = host_list
-
# the 'omit' value alows params to be left out if the variable they are based on is undefined
all_vars['omit'] = self._omit_token
-
all_vars['ansible_version'] = CLI.version_info(gitinfo=False)
if 'hostvars' in all_vars and host:
@@ -304,7 +334,7 @@ class VariableManager:
def _get_inventory_basename(self, path):
'''
- Returns the bsaename minus the extension of the given path, so the
+ Returns the basename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
@@ -337,14 +367,14 @@ class VariableManager:
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
if results is not None:
- data = self._combine_vars(data, results)
+ data = combine_vars(data, results)
else:
file_name, ext = os.path.splitext(path)
data = None
- if not ext:
- for ext in C.YAML_FILENAME_EXTENSIONS:
- new_path = path + ext
+ if not ext or ext not in C.YAML_FILENAME_EXTENSIONS:
+ for test_ext in C.YAML_FILENAME_EXTENSIONS:
+ new_path = path + test_ext
if loader.path_exists(new_path):
data = loader.load_from_file(new_path)
break
@@ -364,7 +394,9 @@ class VariableManager:
(name, data) = self._load_inventory_file(path, loader)
if data:
- self._host_vars_files[name] = data
+ if name not in self._host_vars_files:
+ self._host_vars_files[name] = []
+ self._host_vars_files[name].append(data)
return data
else:
return dict()
@@ -378,7 +410,9 @@ class VariableManager:
(name, data) = self._load_inventory_file(path, loader)
if data:
- self._group_vars_files[name] = data
+ if name not in self._group_vars_files:
+ self._group_vars_files[name] = []
+ self._group_vars_files[name].append(data)
return data
else:
return dict()
@@ -398,6 +432,21 @@ class VariableManager:
except KeyError:
self._fact_cache[host.name] = facts
+ def set_nonpersistent_facts(self, host, facts):
+ '''
+ Sets or updates the given facts for a host in the fact cache.
+ '''
+
+ assert isinstance(facts, dict)
+
+ if host.name not in self._nonpersistent_fact_cache:
+ self._nonpersistent_fact_cache[host.name] = facts
+ else:
+ try:
+ self._nonpersistent_fact_cache[host.name].update(facts)
+ except KeyError:
+ self._nonpersistent_fact_cache[host.name] = facts
+
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
index 39c6dfa26a..abacfd31bf 100644
--- a/lib/ansible/vars/hostvars.py
+++ b/lib/ansible/vars/hostvars.py
@@ -43,7 +43,7 @@ class HostVars(collections.Mapping):
# in inventory
restriction = inventory._restriction
inventory.remove_restriction()
- hosts = inventory.get_hosts()
+ hosts = inventory.get_hosts(ignore_limits_and_restrictions=True)
inventory.restrict_to_hosts(restriction)
# check to see if localhost is in the hosts list, as we
@@ -62,7 +62,7 @@ class HostVars(collections.Mapping):
new_host = Host(name='localhost')
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
- new_host.ipv4_address = '127.0.0.1'
+ new_host.address = '127.0.0.1'
hosts.append(new_host)
for host in hosts:
diff --git a/lib/ansible/vars/unsafe_proxy.py b/lib/ansible/vars/unsafe_proxy.py
new file mode 100644
index 0000000000..a69bfb262d
--- /dev/null
+++ b/lib/ansible/vars/unsafe_proxy.py
@@ -0,0 +1,151 @@
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
+# retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+#
+# Original Python Recipe for Proxy:
+# http://code.activestate.com/recipes/496741-object-proxying/
+# Author: Tomer Filiba
+
+class UnsafeProxy(object):
+ __slots__ = ["_obj", "__weakref__"]
+ def __init__(self, obj):
+ object.__setattr__(self, "_obj", obj)
+
+ #
+ # proxying (special cases)
+ #
+ def __getattribute__(self, name):
+ if name == '_obj':
+ return object.__getattribute__(self, "_obj")
+ elif name == '__reduce_ex__':
+ return object.__getattribute__(self, "__reduce_ex__")
+ elif name == '__UNSAFE__':
+ return True
+ else:
+ return getattr(object.__getattribute__(self, "_obj"), name)
+
+ def __eq__(self, obj):
+ '''
+ special handling for == due to the fact that int objects do
+ not define it, so trying to guess whether we should or should
+ not override object.__eq__ with the wrapped classes version
+ causes problems
+ '''
+ return object.__getattribute__(self, "_obj") == obj
+
+ def __delattr__(self, name):
+ delattr(object.__getattribute__(self, "_obj"), name)
+ def __setattr__(self, name, value):
+ setattr(object.__getattribute__(self, "_obj"), name, value)
+
+ def __nonzero__(self):
+ return bool(object.__getattribute__(self, "_obj"))
+ def __str__(self):
+ #import epdb; epdb.st()
+ return str(object.__getattribute__(self, "_obj"))
+ def __unicode__(self):
+ #import epdb; epdb.st()
+ return unicode(object.__getattribute__(self, "_obj"))
+ def __repr__(self):
+ return repr(object.__getattribute__(self, "_obj"))
+
+ def __reduce_ex__(self, protocol):
+ return (UnsafeProxy, (self._obj,))
+
+ #
+ # factories
+ #
+ _special_names = [
+ '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
+ '__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
+ '__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
+ '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
+ '__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
+ '__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
+ '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
+ '__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
+ '__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__',
+ '__rdiv__', '__rdivmod__', '__repr__', '__reversed__', '__rfloordiv__',
+ '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__rpow__', '__rrshift__',
+ '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setitem__',
+ '__setslice__', '__sub__', '__truediv__', '__xor__', 'next',
+ ]
+
+ @classmethod
+ def _create_class_proxy(cls, theclass):
+ """creates a proxy for the given class"""
+
+ def make_method(name):
+ def method(self, *args, **kw):
+ return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw)
+ return method
+
+ namespace = {}
+ for name in cls._special_names:
+ if hasattr(theclass, name) and not hasattr(cls, name):
+ namespace[name] = make_method(name)
+ return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
+
+ def __new__(cls, obj, *args, **kwargs):
+ """
+ creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
+ passed to this class' __init__, so deriving classes can define an
+ __init__ method of their own.
+ note: _class_proxy_cache is unique per deriving class (each deriving
+ class must hold its own cache)
+ """
+ try:
+ cache = cls.__dict__["_class_proxy_cache"]
+ except KeyError:
+ cls._class_proxy_cache = cache = {}
+ try:
+ theclass = cache[obj.__class__]
+ except KeyError:
+ cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
+ ins = object.__new__(theclass)
+ return ins
+
diff --git a/packaging/port/sysutils/ansible/Makefile b/packaging/port/sysutils/ansible/Makefile
index 607f2f4a47..10016f9908 100644
--- a/packaging/port/sysutils/ansible/Makefile
+++ b/packaging/port/sysutils/ansible/Makefile
@@ -1,27 +1,66 @@
# $FreeBSD$
-PORTNAME= ansible
-PORTVERSION= 1.0
-PORTREVISION= 1
-CATEGORIES= python net-mgmt
-MASTER_SITES= http://releases.ansible.com/ansible/
-DISTNAME= ${PORTNAME}-${PORTVERSION}
+PORTNAME= ansible
+PORTVERSION= 2.0
+PORTREVISION= 1
+CATEGORIES= python net-mgmt sysutils
+MASTER_SITES= http://releases.ansible.com/ansible/
+DISTNAME= ${PORTNAME}-${PORTVERSION}
-MAINTAINER= briancoca+ansible@gmail.com
-COMMENT= Ansible ssh based config management framework
+MAINTAINER= bcoca@ansible.com
+COMMENT= Ansible ssh based config management framework
-RUN_DEPENDS= python>2.5:${PORTSDIR}/lang/python ${PORTSDIR}/devel/py-Jinja2 ${PORTSDIR}/devel/py-yaml \
- ${PORTSDIR}/security/py-paramiko ${PORTSDIR}/devel/py-simplejson
+LICENSE= GPLv3
+LICENSE_FILE= ${WRKSRC}/COPYING
+
+RUN_DEPENDS= python>2.5:${PORTSDIR}/lang/python \
+ ${PORTSDIR}/devel/py-Jinja2 \
+ ${PORTSDIR}/devel/py-yaml \
+ ${PORTSDIR}/security/py-paramiko \
+ ${PORTSDIR}/devel/py-simplejson \
+ ${PORTSDIR}/security/py-pycrypto
+
+OPTIONS_DEFINE= EXAMPLES
FETCH_ARGS= -pRr
-USE_PYTHON= yes
+USE_PYTHON= yes
USE_PYDISTUTILS= yes
-#TODO:add optional dependencies (fireball mode i.e)
+CPE_VENDOR= Ansible Inc
+
+SUB_FILES= pkg-message
+
+PLIST_FILES+= man/man1/ansible-doc.1.gz \
+ man/man1/ansible-galaxy.1.gz \
+ man/man1/ansible-playbook.1.gz \
+ man/man1/ansible-pull.1.gz \
+ man/man1/ansible-vault.1.gz \
+ man/man1/ansible.1.gz
+
+PORTEXAMPLES= ansible.cfg hosts
+
+post-patch:
+.for f in bin/ansible-galaxy docs/man/man1/ansible-galaxy.1 \
+ docs/man/man1/ansible-playbook.1 docs/man/man1/ansible.1 \
+ examples/ansible.cfg examples/hosts lib/ansible/constants.py \
+ lib/ansible/module_utils/urls.py lib/ansible/modules/core/system/setup.py \
+ lib/ansible/playbook/__init__.py lib/ansible/runner/__init__.py
+ @${REINPLACE_CMD} -e 's|/etc/ansible|${ETCDIR}|' ${WRKSRC}/${f}
+.endfor
+
+.for f in docs/man/man1/ansible-playbook.1 docs/man/man1/ansible.1 \
+ examples/ansible.cfg lib/ansible/constants.py \
+ lib/ansible/playbook/__init__.py lib/ansible/runner/__init__.py \
+ @${REINPLACE_CMD} -e 's|/usr/share/ansible|${DATADIR}|' ${WRKSRC}/${f}
+.endfor
-# extracts with github name + short hash, needs to be updated with new releases
-#pre-configure:
-# @${MV} ${WRKDIR}/ansible ${WRKSRC}
+post-install:
+ @${MKDIR} ${STAGEDIR}${MAN1PREFIX}/man/man1
+ ${INSTALL_MAN} ${WRKSRC}/docs/man/man1/*.1 \
+ ${STAGEDIR}${MAN1PREFIX}/man/man1
+ @${MKDIR} ${STAGEDIR}${EXAMPLESDIR}
+ ${INSTALL_DATA} ${WRKSRC}/examples/ansible.cfg ${STAGEDIR}${EXAMPLESDIR}
+ ${INSTALL_DATA} ${WRKSRC}/examples/hosts ${STAGEDIR}${EXAMPLESDIR}
.include <bsd.port.mk>
diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec
index ddda6eeb79..cdfd413d49 100644
--- a/packaging/rpm/ansible.spec
+++ b/packaging/rpm/ansible.spec
@@ -93,6 +93,17 @@ are transferred to managed machines automatically.
%install
%{__python} setup.py install -O1 --prefix=%{_prefix} --root=%{buildroot}
+
+# Amazon Linux doesn't install to dist-packages but python_sitelib expands to
+# that location and the python interpreter expects things to be there.
+if expr x'%{python_sitelib}' : 'x.*dist-packages/\?' ; then
+ DEST_DIR='%{buildroot}%{python_sitelib}'
+ SOURCE_DIR=$(echo "$DEST_DIR" | sed 's/dist-packages/site-packages/g')
+ if test -d "$SOURCE_DIR" -a ! -d "$DEST_DIR" ; then
+ mv $SOURCE_DIR $DEST_DIR
+ fi
+fi
+
mkdir -p %{buildroot}/etc/ansible/
cp examples/hosts %{buildroot}/etc/ansible/
cp examples/ansible.cfg %{buildroot}/etc/ansible/
diff --git a/samples/include.yml b/samples/include.yml
index 121c4ce079..ae62b79bb0 100644
--- a/samples/include.yml
+++ b/samples/include.yml
@@ -2,5 +2,5 @@
tags:
- included
#- debug: msg="this is the second debug in the include"
-#- debug: msg="this is the third debug in the include, and a is still {{a}}"
+- debug: msg="this is the third debug in the include, and a is still {{a}}"
diff --git a/samples/test_block.yml b/samples/test_block.yml
index 25c9003082..626fef5e33 100644
--- a/samples/test_block.yml
+++ b/samples/test_block.yml
@@ -1,5 +1,4 @@
- hosts: all
- connection: local
gather_facts: yes
tasks:
- block:
diff --git a/samples/test_blocks_of_blocks.yml b/samples/test_blocks_of_blocks.yml
index 7933cb6183..5be603cc96 100644
--- a/samples/test_blocks_of_blocks.yml
+++ b/samples/test_blocks_of_blocks.yml
@@ -5,9 +5,13 @@
- block:
- block:
- block:
- - debug: msg="are we there yet?"
+ - debug: msg="are we there yet {{foo}}?"
always:
- debug: msg="a random always block"
- fail:
rescue:
- debug: msg="rescuing from the fail"
+ when: not skip_me|default(False)
+ - debug: msg="i am the last task"
+ vars:
+ foo: bar
diff --git a/setup.py b/setup.py
index 60c7d73ffc..7ada3f7f97 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@ setup(name='ansible',
package_dir={ '': 'lib' },
packages=find_packages('lib'),
package_data={
- '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'],
+ '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1', 'galaxy/data/*'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
diff --git a/test/code-smell/replace-urlopen.sh b/test/code-smell/replace-urlopen.sh
new file mode 100755
index 0000000000..410b2e565e
--- /dev/null
+++ b/test/code-smell/replace-urlopen.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+BASEDIR=${1-"."}
+
+URLLIB_USERS=$(find "$BASEDIR" -name '*.py' -exec grep -H urlopen \{\} \;)
+URLLIB_USERS=$(echo "$URLLIB_USERS" | sed '/\(\n\|lib\/ansible\/module_utils\/urls.py\)/d')
+if test -n "$URLLIB_USERS" ; then
+ printf "$URLLIB_USERS"
+ exit 1
+else
+ exit 0
+fi
diff --git a/test/integration/cleanup_ec2.py b/test/integration/cleanup_ec2.py
index 1935f0bdc1..559a5c0ae1 100644
--- a/test/integration/cleanup_ec2.py
+++ b/test/integration/cleanup_ec2.py
@@ -57,7 +57,7 @@ def delete_aws_eips(get_func, attr, opts):
try:
eip_log = open(opts.eip_log, 'r').read().splitlines()
except IOError:
- print opts.eip_log, 'not found.'
+ print('%s not found.' % opts.eip_log)
return
for item in get_func():
@@ -175,5 +175,5 @@ if __name__ == '__main__':
filters = {"tag:Name":opts.match_re.replace('^',''), "instance-state-name": ['running', 'pending', 'stopped' ]}
delete_aws_instances(aws.get_all_instances(filters=filters), opts)
- except KeyboardInterrupt, e:
- print "\nExiting on user command."
+ except KeyboardInterrupt as e:
+ print("\nExiting on user command.")
diff --git a/test/integration/cleanup_gce.py b/test/integration/cleanup_gce.py
index e0cf0bc043..c807ebb81a 100644
--- a/test/integration/cleanup_gce.py
+++ b/test/integration/cleanup_gce.py
@@ -73,5 +73,5 @@ if __name__ == '__main__':
delete_gce_resources(get_snapshots, 'name', opts)
# Delete matching disks
delete_gce_resources(gce.list_volumes, 'name', opts)
- except KeyboardInterrupt, e:
- print "\nExiting on user command."
+ except KeyboardInterrupt as e:
+ print("\nExiting on user command.")
diff --git a/test/integration/cleanup_rax.py b/test/integration/cleanup_rax.py
index f872e9458d..5c757f53c5 100644..100755
--- a/test/integration/cleanup_rax.py
+++ b/test/integration/cleanup_rax.py
@@ -54,8 +54,8 @@ def authenticate():
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
- assert (hasattr(item, 'delete') or hasattr(item, 'terminate'),
- "Class <%s> has no delete or terminate attribute" % item.__class__)
+ assert hasattr(item, 'delete') or hasattr(item, 'terminate'), \
+ "Class <%s> has no delete or terminate attribute" % item.__class__
if assumeyes:
if hasattr(item, 'delete'):
item.delete()
diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml
index 486bd6e158..be36cc615a 100644
--- a/test/integration/cloudstack.yml
+++ b/test/integration/cloudstack.yml
@@ -7,6 +7,7 @@
vars:
cs_resource_prefix: "{{ resource_prefix | default('cs-') }}"
roles:
+ - { role: test_cs_user, tags: test_cs_user }
- { role: test_cs_project, tags: test_cs_project }
- { role: test_cs_iso, tags: test_cs_iso }
- { role: test_cs_domain, tags: test_cs_domain }
diff --git a/test/integration/consul_running.py b/test/integration/consul_running.py
index 9fdff9ef59..f64aaeecc3 100644
--- a/test/integration/consul_running.py
+++ b/test/integration/consul_running.py
@@ -6,6 +6,6 @@ if __name__ == '__main__':
import consul
consul = consul.Consul(host='0.0.0.0', port=8500)
consul.catalog.nodes()
- print "True"
+ print("True")
except:
pass
diff --git a/test/integration/galaxy_roles.yml b/test/integration/galaxy_roles.yml
index 76b385191c..5a1f8eba86 100644
--- a/test/integration/galaxy_roles.yml
+++ b/test/integration/galaxy_roles.yml
@@ -1,3 +1,7 @@
+# change these to some ansible owned test roles
+- src: briancoca.oracle_java7
+ name: oracle_java7
+
- src: git+http://bitbucket.org/willthames/git-ansible-galaxy
version: v1.4
diff --git a/test/integration/roles/test_cs_user/meta/main.yml b/test/integration/roles/test_cs_user/meta/main.yml
new file mode 100644
index 0000000000..03e38bd4f7
--- /dev/null
+++ b/test/integration/roles/test_cs_user/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - test_cs_common
diff --git a/test/integration/roles/test_cs_user/tasks/main.yml b/test/integration/roles/test_cs_user/tasks/main.yml
new file mode 100644
index 0000000000..c5aaed8d58
--- /dev/null
+++ b/test/integration/roles/test_cs_user/tasks/main.yml
@@ -0,0 +1,276 @@
+---
+- name: setup
+ cs_user: username={{ cs_resource_prefix }}_user state=absent
+ register: user
+- name: verify setup
+ assert:
+ that:
+ - user|success
+
+- name: test fail if missing username
+ action: cs_user
+ register: user
+ ignore_errors: true
+- name: verify results of fail if missing params
+ assert:
+ that:
+ - user|failed
+ - 'user.msg == "missing required arguments: username"'
+
+- name: test fail if missing params if state=present
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ register: user
+ ignore_errors: true
+- name: verify results of fail if missing params if state=present
+ assert:
+ that:
+ - user|failed
+ - 'user.msg == "missing required arguments: account,email,password,first_name,last_name"'
+
+- name: test create user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ password: "{{ cs_resource_prefix }}_password"
+ last_name: "{{ cs_resource_prefix }}_last_name"
+ first_name: "{{ cs_resource_prefix }}_first_name"
+ email: "{{ cs_resource_prefix }}@example.com"
+ account: "admin"
+ register: user
+- name: verify results of create user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.first_name == "{{ cs_resource_prefix }}_first_name"
+ - user.last_name == "{{ cs_resource_prefix }}_last_name"
+ - user.email == "{{ cs_resource_prefix }}@example.com"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test create user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ password: "{{ cs_resource_prefix }}_password"
+ last_name: "{{ cs_resource_prefix }}_last_name"
+ first_name: "{{ cs_resource_prefix }}_first_name"
+ email: "{{ cs_resource_prefix }}@example.com"
+ account: "admin"
+ register: user
+- name: verify results of create user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.first_name == "{{ cs_resource_prefix }}_first_name"
+ - user.last_name == "{{ cs_resource_prefix }}_last_name"
+ - user.email == "{{ cs_resource_prefix }}@example.com"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test update user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ password: "{{ cs_resource_prefix }}_password"
+ last_name: "{{ cs_resource_prefix }}_last_name1"
+ first_name: "{{ cs_resource_prefix }}_first_name1"
+ email: "{{ cs_resource_prefix }}@example.com1"
+ account: "admin"
+ register: user
+- name: verify results of update user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.first_name == "{{ cs_resource_prefix }}_first_name1"
+ - user.last_name == "{{ cs_resource_prefix }}_last_name1"
+ - user.email == "{{ cs_resource_prefix }}@example.com1"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test update user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ password: "{{ cs_resource_prefix }}_password"
+ last_name: "{{ cs_resource_prefix }}_last_name1"
+ first_name: "{{ cs_resource_prefix }}_first_name1"
+ email: "{{ cs_resource_prefix }}@example.com1"
+ account: "admin"
+ register: user
+- name: verify results of update user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.first_name == "{{ cs_resource_prefix }}_first_name1"
+ - user.last_name == "{{ cs_resource_prefix }}_last_name1"
+ - user.email == "{{ cs_resource_prefix }}@example.com1"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test lock user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: locked
+ register: user
+- name: verify results of lock user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "locked"
+ - user.domain == "ROOT"
+
+- name: test lock user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: locked
+ register: user
+- name: verify results of lock user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "locked"
+ - user.domain == "ROOT"
+
+- name: test disable user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: disabled
+ register: user
+- name: verify results of disable user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "disabled"
+ - user.domain == "ROOT"
+
+- name: test disable user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: disabled
+ register: user
+- name: verify results of disable user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "disabled"
+ - user.domain == "ROOT"
+
+- name: test lock disabled user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: locked
+ register: user
+- name: verify results of lock disabled user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "locked"
+ - user.domain == "ROOT"
+
+- name: test lock disabled user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: locked
+ register: user
+- name: verify results of lock disabled user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "locked"
+ - user.domain == "ROOT"
+
+- name: test enable user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: enabled
+ register: user
+- name: verify results of enable user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test enable user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: enabled
+ register: user
+- name: verify results of enable user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test remove user
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: absent
+ register: user
+- name: verify results of remove user
+ assert:
+ that:
+ - user|success
+ - user|changed
+ - user.username == "{{ cs_resource_prefix }}_user"
+ - user.account_type == "root_admin"
+ - user.account == "admin"
+ - user.state == "enabled"
+ - user.domain == "ROOT"
+
+- name: test remove user idempotence
+ cs_user:
+ username: "{{ cs_resource_prefix }}_user"
+ state: absent
+ register: user
+- name: verify results of remove user idempotence
+ assert:
+ that:
+ - user|success
+ - not user|changed
diff --git a/test/integration/roles/test_fetch/tasks/main.yml b/test/integration/roles/test_fetch/tasks/main.yml
index 47669d6c6d..e14dc2fdad 100644
--- a/test/integration/roles/test_fetch/tasks/main.yml
+++ b/test/integration/roles/test_fetch/tasks/main.yml
@@ -38,5 +38,34 @@
that:
'diff.stdout == ""'
-
+- name: attempt to fetch a non-existent file - do not fail on missing
+ fetch: src={{ output_dir }}/doesnotexist dest={{ output_dir }}/fetched
+ register: fetch_missing_nofail
+- name: check fetch missing no fail result
+ assert:
+ that:
+ - "fetch_missing_nofail.msg"
+ - "not fetch_missing_nofail|changed"
+
+- name: attempt to fetch a non-existent file - fail on missing
+ fetch: src={{ output_dir }}/doesnotexist dest={{ output_dir }}/fetched fail_on_missing=yes
+ register: fetch_missing
+ ignore_errors: true
+
+- name: check fetch missing with failure
+ assert:
+ that:
+ - "fetch_missing|failed"
+ - "fetch_missing.msg"
+ - "not fetch_missing|changed"
+
+- name: attempt to fetch a directory - should not fail but return a message
+ fetch: src={{ output_dir }} dest={{ output_dir }}/somedir
+ register: fetch_dir
+
+- name: check fetch directory result
+ assert:
+ that:
+ - "not fetch_dir|changed"
+ - "fetch_dir.msg"
diff --git a/test/integration/roles/test_includes/tasks/main.yml b/test/integration/roles/test_includes/tasks/main.yml
index b4808412be..33aefe8959 100644
--- a/test/integration/roles/test_includes/tasks/main.yml
+++ b/test/integration/roles/test_includes/tasks/main.yml
@@ -31,11 +31,6 @@
b: 102
c: 103
-# Params specified via k=v values are strings, while those
-# that come from variables will keep the type they were previously.
-# Prior to v2.0, facts too priority over include params, however
-# this is no longer the case.
-
- include: included_task1.yml a={{a}} b={{b}} c=103
- name: verify variable include params
@@ -43,10 +38,10 @@
that:
- "ca == 101"
- "cb == 102"
- - "cc == '103'"
+ - "cc == 103"
# Test that strings are not turned into numbers
-- set_fact:
+- set_fact:
a: "101"
b: "102"
c: "103"
@@ -54,7 +49,7 @@
- include: included_task1.yml a={{a}} b={{b}} c=103
- name: verify variable include params
- assert:
+ assert:
that:
- "ca == '101'"
- "cb == '102'"
diff --git a/test/integration/roles/test_service/files/ansible_test_service b/test/integration/roles/test_service/files/ansible_test_service
index 66c3a3a2d4..5e8691f2f1 100755
--- a/test/integration/roles/test_service/files/ansible_test_service
+++ b/test/integration/roles/test_service/files/ansible_test_service
@@ -20,7 +20,7 @@ else:
def createDaemon():
try:
pid = os.fork()
- except OSError, e:
+ except OSError as e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0):
@@ -28,7 +28,7 @@ def createDaemon():
try:
pid = os.fork()
- except OSError, e:
+ except OSError as e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0):
diff --git a/lib/ansible/galaxy/data/__init__.py b/test/integration/roles/test_win_copy/files/empty.txt
index e69de29bb2..e69de29bb2 100644
--- a/lib/ansible/galaxy/data/__init__.py
+++ b/test/integration/roles/test_win_copy/files/empty.txt
diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml
index 48df427380..3d29775894 100644
--- a/test/integration/roles/test_win_copy/tasks/main.yml
+++ b/test/integration/roles/test_win_copy/tasks/main.yml
@@ -19,6 +19,41 @@
- name: record the output directory
set_fact: output_file={{win_output_dir}}/foo.txt
+- name: copy an empty file
+ win_copy:
+ src: empty.txt
+ dest: "{{win_output_dir}}/empty.txt"
+ register: copy_empty_result
+
+- name: check copy empty result
+ assert:
+ that:
+ - copy_empty_result|changed
+ - copy_empty_result.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+
+- name: stat the empty file
+ win_stat:
+ path: "{{win_output_dir}}/empty.txt"
+ register: stat_empty_result
+
+- name: check that empty file really was created
+ assert:
+ that:
+ - stat_empty_result.stat.exists
+ - stat_empty_result.stat.size == 0
+
+- name: copy an empty file again
+ win_copy:
+ src: empty.txt
+ dest: "{{win_output_dir}}/empty.txt"
+ register: copy_empty_again_result
+
+- name: check copy empty again result
+ assert:
+ that:
+ - not copy_empty_again_result|changed
+ - copy_empty_again_result.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+
- name: initiate a basic copy
#- name: initiate a basic copy, and also test the mode
# win_copy: src=foo.txt dest={{output_file}} mode=0444
diff --git a/test/integration/roles/test_win_group/defaults/main.yml b/test/integration/roles/test_win_group/defaults/main.yml
new file mode 100644
index 0000000000..2ab744e822
--- /dev/null
+++ b/test/integration/roles/test_win_group/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+
+test_win_group_name: Ansiblings
+test_win_group_description: Test group for Ansible
diff --git a/test/integration/roles/test_win_group/tasks/main.yml b/test/integration/roles/test_win_group/tasks/main.yml
new file mode 100644
index 0000000000..e380ab18ec
--- /dev/null
+++ b/test/integration/roles/test_win_group/tasks/main.yml
@@ -0,0 +1,101 @@
+# test code for the slurp module when using winrm connection
+# (c) 2015, Chris Church <cchurch@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: remove test group if it exists
+ win_group:
+ name: "{{test_win_group_name}}"
+ state: absent
+
+- name: create test group with missing name parameter
+ win_group:
+ description: "{{test_win_group_description}}"
+ register: win_group_create_noname
+ ignore_errors: true
+
+- name: check create result without name parameter
+ assert:
+ that:
+ - "win_group_create_noname|failed"
+
+- name: create test group with invalid state parameter
+ win_group:
+ name: "{{test_win_group_name}}"
+ state: "unknown"
+ register: win_group_create_invalid_state
+ ignore_errors: true
+
+- name: check create result with invalid state parameter
+ assert:
+ that:
+ - "win_group_create_invalid_state|failed"
+
+- name: create test group
+ win_group:
+ name: "{{test_win_group_name}}"
+ description: "{{test_win_group_description}}"
+ register: win_group_create
+
+- name: check create group results
+ assert:
+ that:
+ - "win_group_create|changed"
+
+- name: create test group again with same options
+ win_group:
+ name: "{{test_win_group_name}}"
+ description: "{{test_win_group_description}}"
+ state: "present"
+ register: win_group_create_again
+
+- name: check create group again results
+ assert:
+ that:
+ - "not win_group_create_again|changed"
+
+- name: create test group again but change description
+ win_group:
+ name: "{{test_win_group_name}}"
+ description: "{{test_win_group_description}} updated"
+ register: win_group_create_new_description
+
+- name: check create group results after updating description
+ assert:
+ that:
+ - "win_group_create_new_description|changed"
+
+- name: remove test group
+ win_group:
+ name: "{{test_win_group_name}}"
+ state: absent
+ register: win_group_remove
+
+- name: check remove group result
+ assert:
+ that:
+ - "win_group_remove|changed"
+
+- name: remove test group again
+ win_group:
+ name: "{{test_win_group_name}}"
+ state: absent
+ register: win_group_remove_again
+
+- name: check remove group again result
+ assert:
+ that:
+ - "not win_group_remove_again|changed"
diff --git a/test/integration/roles/test_win_lineinfile/files/test.txt b/test/integration/roles/test_win_lineinfile/files/test.txt
new file mode 100644
index 0000000000..8187db9f02
--- /dev/null
+++ b/test/integration/roles/test_win_lineinfile/files/test.txt
@@ -0,0 +1,5 @@
+This is line 1
+This is line 2
+REF this is a line for backrefs REF
+This is line 4
+This is line 5
diff --git a/v1/ansible/callback_plugins/__init__.py b/test/integration/roles/test_win_lineinfile/files/test_quoting.txt
index e69de29bb2..e69de29bb2 100644
--- a/v1/ansible/callback_plugins/__init__.py
+++ b/test/integration/roles/test_win_lineinfile/files/test_quoting.txt
diff --git a/v1/ansible/inventory/vars_plugins/__init__.py b/test/integration/roles/test_win_lineinfile/files/testempty.txt
index e69de29bb2..e69de29bb2 100644
--- a/v1/ansible/inventory/vars_plugins/__init__.py
+++ b/test/integration/roles/test_win_lineinfile/files/testempty.txt
diff --git a/test/integration/roles/test_win_lineinfile/files/testnoeof.txt b/test/integration/roles/test_win_lineinfile/files/testnoeof.txt
new file mode 100644
index 0000000000..152780b9ff
--- /dev/null
+++ b/test/integration/roles/test_win_lineinfile/files/testnoeof.txt
@@ -0,0 +1,2 @@
+This is line 1
+This is line 2 \ No newline at end of file
diff --git a/test/integration/roles/test_win_lineinfile/meta/main.yml b/test/integration/roles/test_win_lineinfile/meta/main.yml
new file mode 100644
index 0000000000..55200b3fc6
--- /dev/null
+++ b/test/integration/roles/test_win_lineinfile/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_win_tests
+
diff --git a/test/integration/roles/test_win_lineinfile/tasks/main.yml b/test/integration/roles/test_win_lineinfile/tasks/main.yml
new file mode 100644
index 0000000000..45d0ae96a5
--- /dev/null
+++ b/test/integration/roles/test_win_lineinfile/tasks/main.yml
@@ -0,0 +1,641 @@
+# Test code for the win_lineinfile module, adapted from the standard lineinfile module tests
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+- name: deploy the test file for lineinfile
+ win_copy: src=test.txt dest={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert that the test file was deployed
+ assert:
+ that:
+ - "result.changed == true"
+
+- name: stat the test file
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: check win_stat file result
+ assert:
+ that:
+ - "result.stat.exists"
+ - "not result.stat.isdir"
+ - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'"
+ - "not result|failed"
+ - "not result|changed"
+
+
+- name: insert a line at the beginning of the file, and back it up
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line at the beginning" insertbefore="BOF" backup=yes
+ register: result
+
+- name: assert that the line was inserted at the head of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.backup != ''"
+
+- name: stat the backup file
+ win_stat: path={{result.backup}}
+ register: result
+
+- name: assert the backup file matches the previous hash
+ assert:
+ that:
+ - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'"
+
+- name: stat the test after the insert at the head
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test hash is what we expect for the file with the insert at the head
+ assert:
+ that:
+ - "result.stat.checksum == 'b526e2e044defc64dfb0fad2f56e105178f317d8'"
+
+- name: insert a line at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line at the end" insertafter="EOF"
+ register: result
+
+- name: assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the test after the insert at the end
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'dd5e207e28ce694ab18e41c2b16deb74fde93b14'"
+
+- name: insert a line after the first line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line after line 1" insertafter="^This is line 1$"
+ register: result
+
+- name: assert that the line was inserted after the first line
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the test after insert after the first line
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the insert after the first line
+ assert:
+ that:
+ - "result.stat.checksum == '604b17405f2088e6868af9680b7834087acdc8f4'"
+
+- name: insert a line before the last line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line before line 5" insertbefore="^This is line 5$"
+ register: result
+
+- name: assert that the line was inserted before the last line
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the test after the insert before the last line
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the insert before the last line
+ assert:
+ that:
+ - "result.stat.checksum == '8f5b30e8f01578043d782e5a68d4c327e75a6e34'"
+
+- name: replace a line with backrefs
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="This is line 3" backrefs=yes regexp="^(REF).*$"
+ register: result
+
+- name: assert that the line with backrefs was changed
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line replaced'"
+
+- name: stat the test after the backref line was replaced
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == 'ef6b02645908511a2cfd2df29d50dd008897c580'"
+
+- name: remove the middle line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 3$"
+ register: result
+
+- name: assert that the line was removed
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == '1 line(s) removed'"
+
+- name: stat the test after the middle line was removed
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the middle line was removed
+ assert:
+ that:
+ - "result.stat.checksum == '11695efa472be5c31c736bc43e055f8ac90eabdf'"
+
+- name: run a validation script that succeeds
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 5$" validate="sort.exe %s"
+ register: result
+
+- name: assert that the file validated after removing a line
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == '1 line(s) removed'"
+
+- name: stat the test after the validation succeeded
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the validation succeeded
+ assert:
+ that:
+ - "result.stat.checksum == '39c38a30aa6ac6af9ec41f54c7ed7683f1249347'"
+
+- name: run a validation script that fails
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 1$" validate="sort.exe %s.foo"
+ register: result
+ ignore_errors: yes
+
+- name: assert that the validate failed
+ assert:
+ that:
+ - "result.failed == true"
+
+- name: stat the test after the validation failed
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches the previous after the validation failed
+ assert:
+ that:
+ - "result.stat.checksum == '39c38a30aa6ac6af9ec41f54c7ed7683f1249347'"
+
+- name: use create=yes
+ win_lineinfile: dest={{win_output_dir}}/new_test.txt create=yes insertbefore=BOF state=present line="This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/new_test.txt
+ register: result
+ ignore_errors: yes
+
+- name: assert the newly created test checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'"
+
+# Test EOF in cases where file has no newline at EOF
+- name: testnoeof deploy the file for lineinfile
+ win_copy: src=testnoeof.txt dest={{win_output_dir}}/testnoeof.txt
+ register: result
+
+- name: testnoeof insert a line at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/testnoeof.txt state=present line="New line at the end" insertafter="EOF"
+ register: result
+
+- name: testempty assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: testnoeof stat the no newline EOF test after the insert at the end
+ win_stat: path={{win_output_dir}}/testnoeof.txt
+ register: result
+
+- name: testnoeof assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == '229852b09f7e9921fbcbb0ee0166ba78f7f7f261'"
+
+- name: add multiple lines at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="This is a line\r\nwith newline character" insertafter="EOF"
+ register: result
+
+- name: assert that the multiple lines was inserted
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat file after adding multiple lines
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after inserting multiple lines
+ assert:
+ that:
+ - "result.stat.checksum == '1401413cd4eac732be66cd6aceddd334c4240f86'"
+
+
+
+# Test EOF with empty file to make sure no unnecessary newline is added
+- name: testempty deploy the testempty file for lineinfile
+ win_copy: src=testempty.txt dest={{win_output_dir}}/testempty.txt
+ register: result
+
+- name: testempty insert a line at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/testempty.txt state=present line="New line at the end" insertafter="EOF"
+ register: result
+
+- name: testempty assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: testempty stat the test after the insert at the end
+ win_stat: path={{win_output_dir}}/testempty.txt
+ register: result
+
+- name: testempty assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'd3d34f11edda51be7ca5dcb0757cf3e1257c0bfe'"
+
+
+
+- name: replace a line with backrefs included in the line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New $1 created with the backref" backrefs=yes regexp="^This is (line 4)$"
+ register: result
+
+- name: assert that the line with backrefs was changed
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line replaced'"
+
+- name: stat the test after the backref line was replaced
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == 'e6ff42e926dac2274c93dff0b8a323e07ae09149'"
+
+###################################################################
+# issue 8535
+
+- name: create a new file for testing quoting issues
+ win_copy: src=test_quoting.txt dest={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert the new file was created
+ assert:
+ that:
+ - result.changed
+
+- name: use with_items to add code-like strings to the quoting txt file
+ win_lineinfile: >
+ dest={{win_output_dir}}/test_quoting.txt
+ line="{{ item }}"
+ insertbefore="BOF"
+ with_items:
+ - "'foo'"
+ - "dotenv.load();"
+ - "var dotenv = require('dotenv');"
+ register: result
+
+- name: assert the quote test file was modified correctly
+ assert:
+ that:
+ - result.results|length == 3
+ - result.results[0].changed
+ - result.results[0].item == "'foo'"
+ - result.results[1].changed
+ - result.results[1].item == "dotenv.load();"
+ - result.results[2].changed
+ - result.results[2].item == "var dotenv = require('dotenv');"
+
+- name: stat the quote test file
+ win_stat: path={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert test checksum matches for quote test file
+ assert:
+ that:
+ - "result.stat.checksum == 'f3bccdbdfa1d7176c497ef87d04957af40ab48d2'"
+
+- name: append a line into the quoted file with a single quote
+ win_lineinfile: dest={{win_output_dir}}/test_quoting.txt line="import g'"
+ register: result
+
+- name: assert that the quoted file was changed
+ assert:
+ that:
+ - result.changed
+
+- name: stat the quote test file
+ win_stat: path={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert test checksum matches adding line with single quote
+ assert:
+ that:
+ - "result.stat.checksum == 'dabf4cbe471e1797d8dcfc773b6b638c524d5237'"
+
+- name: insert a line into the quoted file with many double quotation strings
+ win_lineinfile: dest={{win_output_dir}}/test_quoting.txt line='"quote" and "unquote"'
+ register: result
+
+- name: assert that the quoted file was changed
+ assert:
+ that:
+ - result.changed
+
+- name: stat the quote test file
+ win_stat: path={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert test checksum matches quoted line added
+ assert:
+ that:
+ - "result.stat.checksum == '9dc1fc1ff19942e2936564102ad37134fa83b91d'"
+
+
+# Windows vs. Unix line separator test cases
+
+- name: Create windows test file with initial line
+ win_lineinfile: dest={{win_output_dir}}/test_windows_sep.txt create=yes insertbefore=BOF state=present line="This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_windows_sep.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'"
+
+- name: Test appending to the file using the default (windows) line separator
+ win_lineinfile: dest={{win_output_dir}}/test_windows_sep.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_windows_sep.txt
+ register: result
+
+- name: assert the file checksum matches expected checksum
+ assert:
+ that:
+ - "result.stat.checksum == '71a17ddd1d57ed7c7912e4fd11ecb2ead0b27033'"
+
+
+- name: Create unix test file with initial line
+ win_lineinfile: dest={{win_output_dir}}/test_unix_sep.txt create=yes insertbefore=BOF state=present line="This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_unix_sep.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'"
+
+- name: Test appending to the file using unix line separator
+ win_lineinfile: dest={{win_output_dir}}/test_unix_sep.txt insertbefore=EOF state=present line="This is the last line" newline="unix"
+ register: result
+
+- name: assert that the new line was added
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_unix_sep.txt
+ register: result
+
+- name: assert the file checksum matches expected checksum
+ assert:
+ that:
+ - "result.stat.checksum == 'f1f634a37ab1c73efb77a71a5ad2cc87b61b17ae'"
+
+
+# Encoding management test cases
+
+# Default (auto) encoding should use utf-8 with no BOM
+- name: Test create file without explicit encoding results in utf-8 without BOM
+ win_lineinfile: dest={{win_output_dir}}/test_auto_utf8.txt create=yes insertbefore=BOF state=present line="This is a new utf-8 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_auto_utf8.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == 'b69fcbacca8291a4668f57fba91d7c022f1c3dc7'"
+
+- name: Test appending to the utf-8 without BOM file - should autodetect UTF-8 no BOM
+ win_lineinfile: dest={{win_output_dir}}/test_auto_utf8.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_auto_utf8.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '64d747f1ebf8c9d793dbfd27126e4152d39a3848'"
+
+
+# UTF-8 explicit (with BOM)
+- name: Test create file with explicit utf-8 encoding results in utf-8 with a BOM
+ win_lineinfile: dest={{win_output_dir}}/test_utf8.txt create=yes encoding="utf-8" insertbefore=BOF state=present line="This is a new utf-8 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_utf8.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == 'd45344b2b3bf1cf90eae851b40612f5f37a88bbb'"
+
+- name: Test appending to the utf-8 with BOM file - should autodetect utf-8 with BOM encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf8.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_utf8.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '9b84254489f40f258871a4c6573cacc65895ee1a'"
+
+
+# UTF-16 explicit
+- name: Test create file with explicit utf-16 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf16.txt create=yes encoding="utf-16" insertbefore=BOF state=present line="This is a new utf-16 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-16'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_utf16.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '785b0693cec13b60e2c232782adeda2f8a967434'"
+
+- name: Test appending to the utf-16 file - should autodetect utf-16 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf16.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-16'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_utf16.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '70e4eb3ba795e1ba94d262db47e4fd17c64b2e73'"
+
+# UTF-32 explicit
+- name: Test create file with explicit utf-32 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf32.txt create=yes encoding="utf-32" insertbefore=BOF state=present line="This is a new utf-32 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-32'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_utf32.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '7a6e3f3604c0def431aaa813173a4ddaa10fd1fb'"
+
+- name: Test appending to the utf-32 file - should autodetect utf-32 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf32.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-32'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_utf32.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '66a72e71f42c4775f4326da95cfe82c8830e5022'"
+
diff --git a/test/integration/roles/test_win_msi/defaults/main.yml b/test/integration/roles/test_win_msi/defaults/main.yml
new file mode 100644
index 0000000000..6e79dcd146
--- /dev/null
+++ b/test/integration/roles/test_win_msi/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+
+msi_url: http://downloads.sourceforge.net/project/sevenzip/7-Zip/9.22/7z922-x64.msi
+msi_download_path: "C:\\Program Files\\7z922-x64.msi"
+msi_install_path: "C:\\Program Files\\7-Zip"
+msi_product_code: "{23170F69-40C1-2702-0922-000001000000}"
diff --git a/test/integration/roles/test_win_msi/tasks/main.yml b/test/integration/roles/test_win_msi/tasks/main.yml
index 85c9957a1d..1ac467212a 100644
--- a/test/integration/roles/test_win_msi/tasks/main.yml
+++ b/test/integration/roles/test_win_msi/tasks/main.yml
@@ -17,21 +17,44 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
- name: use win_get_url module to download msi
- win_get_url: url=http://downloads.sourceforge.net/project/sevenzip/7-Zip/9.22/7z922-x64.msi dest='C:\\7z922-x64.msi'
+ win_get_url:
+ url: "{{msi_url}}"
+ dest: "{{msi_download_path}}"
register: win_get_url_result
-- name: install 7zip msi
- win_msi: path="{{ win_get_url_result.win_get_url.dest }}"
+- name: make sure msi is uninstalled
+ win_msi:
+ path: "{{msi_product_code|default(msi_download_path,true)}}"
+ state: absent
+ ignore_errors: true
+
+- name: install msi
+ win_msi:
+ path: "{{msi_download_path}}"
register: win_msi_install_result
- name: check win_msi install result
- assert:
+ assert:
that:
- "not win_msi_install_result|failed"
- "win_msi_install_result|changed"
-- name: uninstall 7zip msi
- win_msi: path="{{ win_get_url_result.win_get_url.dest }}" state=absent
+- name: install msi again with creates argument
+ win_msi:
+ path: "{{msi_download_path}}"
+ creates: "{{msi_install_path}}"
+ register: win_msi_install_again_result
+
+- name: check win_msi install again result
+ assert:
+ that:
+ - "not win_msi_install_again_result|failed"
+ - "not win_msi_install_again_result|changed"
+
+- name: uninstall msi
+ win_msi:
+ path: "{{msi_download_path}}"
+ state: absent
register: win_msi_uninstall_result
- name: check win_msi uninstall result
diff --git a/test/integration/roles/test_win_ping/library/win_ping_set_attr.ps1 b/test/integration/roles/test_win_ping/library/win_ping_set_attr.ps1
new file mode 100644
index 0000000000..8279b4b414
--- /dev/null
+++ b/test/integration/roles/test_win_ping/library/win_ping_set_attr.ps1
@@ -0,0 +1,31 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = New-Object psobject @{
+ changed = $false
+ ping = "pong"
+};
+
+# Test that Set-Attr will replace an existing attribute.
+Set-Attr $result "ping" $data
+
+Exit-Json $result;
diff --git a/v1/ansible/runner/shell_plugins/fish.py b/test/integration/roles/test_win_ping/library/win_ping_strict_mode_error.ps1
index 137c013c12..d498cbcf17 100644
--- a/v1/ansible/runner/shell_plugins/fish.py
+++ b/test/integration/roles/test_win_ping/library/win_ping_strict_mode_error.ps1
@@ -1,6 +1,5 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
+#!powershell
+# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,9 +14,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from ansible.runner.shell_plugins.sh import ShellModule as ShModule
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args $true;
+
+$x = $params.thisPropertyDoesNotExist
+
+$data = Get-Attr $params "data" "pong";
-class ShellModule(ShModule):
+$result = New-Object psobject @{
+ changed = $false
+ ping = $data
+};
- def env_prefix(self, **kwargs):
- return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
+Exit-Json $result;
diff --git a/test/integration/roles/test_win_ping/library/win_ping_syntax_error.ps1 b/test/integration/roles/test_win_ping/library/win_ping_syntax_error.ps1
new file mode 100644
index 0000000000..6bfe621a80
--- /dev/null
+++ b/test/integration/roles/test_win_ping/library/win_ping_syntax_error.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$blah = 'I can't quote my strings correctly.'
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = New-Object psobject @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/v1/ansible/module_utils/__init__.py b/test/integration/roles/test_win_ping/library/win_ping_throw.ps1
index 266d06a613..f0b32186d8 100644
--- a/v1/ansible/module_utils/__init__.py
+++ b/test/integration/roles/test_win_ping/library/win_ping_throw.ps1
@@ -1,5 +1,4 @@
-# 2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
+#!powershell
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -15,3 +14,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# POWERSHELL_COMMON
+
+throw
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = New-Object psobject @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/v1/hacking/get_library.py b/test/integration/roles/test_win_ping/library/win_ping_throw_string.ps1
index 571183b688..e1f3ca6065 100755..100644
--- a/v1/hacking/get_library.py
+++ b/test/integration/roles/test_win_ping/library/win_ping_throw_string.ps1
@@ -1,7 +1,4 @@
-#!/usr/bin/env python
-
-# (c) 2014, Will Thames <will@thames.id.au>
-#
+#!powershell
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -16,14 +13,18 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import ansible.constants as C
-import sys
+# POWERSHELL_COMMON
+
+throw "no ping for you"
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
-def main():
- print C.DEFAULT_MODULE_PATH
- return 0
+$result = New-Object psobject @{
+ changed = $false
+ ping = $data
+};
-if __name__ == '__main__':
- sys.exit(main())
+Exit-Json $result;
diff --git a/test/integration/roles/test_win_ping/tasks/main.yml b/test/integration/roles/test_win_ping/tasks/main.yml
index f17a4a9227..aa5d03c908 100644
--- a/test/integration/roles/test_win_ping/tasks/main.yml
+++ b/test/integration/roles/test_win_ping/tasks/main.yml
@@ -79,3 +79,68 @@
- "not win_ping_extra_args_result|failed"
- "not win_ping_extra_args_result|changed"
- "win_ping_extra_args_result.ping == 'bloop'"
+
+- name: test modified win_ping that throws an exception
+ action: win_ping_throw
+ register: win_ping_throw_result
+ ignore_errors: true
+
+- name: check win_ping_throw result
+ assert:
+ that:
+ - "win_ping_throw_result|failed"
+ - "not win_ping_throw_result|changed"
+ - "win_ping_throw_result.msg == 'ScriptHalted'"
+ - "win_ping_throw_result.exception"
+ - "win_ping_throw_result.error_record"
+
+- name: test modified win_ping that throws a string exception
+ action: win_ping_throw_string
+ register: win_ping_throw_string_result
+ ignore_errors: true
+
+- name: check win_ping_throw_string result
+ assert:
+ that:
+ - "win_ping_throw_string_result|failed"
+ - "not win_ping_throw_string_result|changed"
+ - "win_ping_throw_string_result.msg == 'no ping for you'"
+ - "win_ping_throw_string_result.exception"
+ - "win_ping_throw_string_result.error_record"
+
+- name: test modified win_ping that has a syntax error
+ action: win_ping_syntax_error
+ register: win_ping_syntax_error_result
+ ignore_errors: true
+
+- name: check win_ping_syntax_error result
+ assert:
+ that:
+ - "win_ping_syntax_error_result|failed"
+ - "not win_ping_syntax_error_result|changed"
+ - "win_ping_syntax_error_result.msg"
+ - "win_ping_syntax_error_result.exception"
+
+- name: test modified win_ping that has an error that only surfaces when strict mode is on
+ action: win_ping_strict_mode_error
+ register: win_ping_strict_mode_error_result
+ ignore_errors: true
+
+- name: check win_ping_strict_mode_error result
+ assert:
+ that:
+ - "win_ping_strict_mode_error_result|failed"
+ - "not win_ping_strict_mode_error_result|changed"
+ - "win_ping_strict_mode_error_result.msg"
+ - "win_ping_strict_mode_error_result.exception"
+
+- name: test modified win_ping to verify a Set-Attr fix
+ action: win_ping_set_attr data="fixed"
+ register: win_ping_set_attr_result
+
+- name: check win_ping_set_attr_result result
+ assert:
+ that:
+ - "not win_ping_set_attr_result|failed"
+ - "not win_ping_set_attr_result|changed"
+ - "win_ping_set_attr_result.ping == 'fixed'"
diff --git a/test/integration/roles/test_win_raw/tasks/main.yml b/test/integration/roles/test_win_raw/tasks/main.yml
index 8a5412c381..6351c516be 100644
--- a/test/integration/roles/test_win_raw/tasks/main.yml
+++ b/test/integration/roles/test_win_raw/tasks/main.yml
@@ -92,3 +92,12 @@
assert:
that:
- "raw_result.stdout_lines[0] == 'wwe=raw'"
+
+- name: run a raw command with unicode chars and quoted args (from https://github.com/ansible/ansible-modules-core/issues/1929)
+ raw: Write-Host --% icacls D:\somedir\ /grant "! ЗАО. Руководство":F
+ register: raw_result2
+
+- name: make sure raw passes command as-is and doesn't split/rejoin args
+ assert:
+ that:
+ - "raw_result2.stdout_lines[0] == '--% icacls D:\\\\somedir\\\\ /grant \"! ЗАО. Руководство\":F'"
diff --git a/test/integration/roles/test_win_script/files/test_script_bool.ps1 b/test/integration/roles/test_win_script/files/test_script_bool.ps1
new file mode 100644
index 0000000000..0484af70e5
--- /dev/null
+++ b/test/integration/roles/test_win_script/files/test_script_bool.ps1
@@ -0,0 +1,6 @@
+Param(
+[bool]$boolvariable
+)
+
+Write-Host $boolvariable.GetType()
+Write-Host $boolvariable
diff --git a/test/integration/roles/test_win_script/tasks/main.yml b/test/integration/roles/test_win_script/tasks/main.yml
index 313569face..46f91f13f8 100644
--- a/test/integration/roles/test_win_script/tasks/main.yml
+++ b/test/integration/roles/test_win_script/tasks/main.yml
@@ -171,3 +171,13 @@
- "not test_cmd_result.stderr"
- "not test_cmd_result|failed"
- "test_cmd_result|changed"
+
+- name: run test script that takes a boolean parameter
+ script: test_script_bool.ps1 $true
+ register: test_script_bool_result
+
+- name: check that the script ran and the parameter was treated as a boolean
+ assert:
+ that:
+ - "test_script_bool_result.stdout_lines[0] == 'System.Boolean'"
+ - "test_script_bool_result.stdout_lines[1] == 'True'"
diff --git a/test/integration/setup_gce.py b/test/integration/setup_gce.py
index 0248d7684d..8aa8babb2d 100644
--- a/test/integration/setup_gce.py
+++ b/test/integration/setup_gce.py
@@ -38,5 +38,5 @@ if __name__ == '__main__':
gce.create_volume_snapshot(base_volume, name=prefix+'-snapshot')
gce.create_volume(
size=10, name=prefix+'-extra', location='us-central1-a')
- except KeyboardInterrupt, e:
- print "\nExiting on user command."
+ except KeyboardInterrupt as e:
+ print("\nExiting on user command.")
diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml
index b249224cb8..f11171faf8 100644
--- a/test/integration/test_winrm.yml
+++ b/test/integration/test_winrm.yml
@@ -32,6 +32,9 @@
- { role: test_win_service, tags: test_win_service }
- { role: test_win_feature, tags: test_win_feature }
- { role: test_win_user, tags: test_win_user }
+ - { role: test_win_group, tags: test_win_group }
- { role: test_win_file, tags: test_win_file }
- { role: test_win_copy, tags: test_win_copy }
- { role: test_win_template, tags: test_win_template }
+ - { role: test_win_lineinfile, tags: test_win_lineinfile }
+
diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml
index 1044c25270..6e8e073a79 100644
--- a/test/integration/unicode.yml
+++ b/test/integration/unicode.yml
@@ -4,10 +4,9 @@
connection: local
vars:
test_var: 'Ī ī Ĭ ĭ Į į İ ı IJ ij Ĵ ĵ Ķ ķ ĸ Ĺ ĺ Ļ ļ Ľ ľ Ŀ ŀ Ł ł Ń ń Ņ ņ Ň ň ʼn Ŋ ŋ Ō ō Ŏ ŏ Ő ő Œ'
- num_hosts: 5
hostnames:
- - 'host-#ϬϭϮϯϰ'
- - 'host-ͰͱͲͳʹ͵'
+ - 'host-ϬϭϮϯϰ'
+ - 'host-fóöbär'
- 'host-ΙΚΛΜΝΞ'
- 'host-στυφχψ'
- 'host-ϬϭϮϯϰϱ'
@@ -29,11 +28,11 @@
- 'ā Ă ă Ą ą Ć ć Ĉ ĉ Ċ ċ Č č Ď ď Đ đ Ē ē Ĕ ĕ Ė ė Ę ę Ě ě Ĝ ĝ Ğ ğ Ġ ġ Ģ ģ Ĥ ĥ Ħ ħ Ĩ ĩ'
- add_host:
- name: '{{hostnames}}.{{item}}'
+ name: '{{item}}'
groups: 'ĪīĬĭ'
+ ansible_ssh_host: 127.0.0.1
ansible_connection: local
- host_id: '{{item}}'
- with_sequence: start=1 end={{num_hosts}} format=%d
+ with_items: hostnames
- name: 'A task with unicode extra vars'
debug: var=extra_var
diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py
index 3b30f9c11c..411b0ed291 100644
--- a/test/units/executor/test_play_iterator.py
+++ b/test/units/executor/test_play_iterator.py
@@ -68,12 +68,16 @@ class TestPlayIterator(unittest.TestCase):
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
+ mock_var_manager = MagicMock()
+ mock_var_manager._fact_cache = dict()
+
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
+ variable_manager=mock_var_manager,
all_vars=dict(),
)
diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py
index 0300b7ad07..eab640d650 100644
--- a/test/units/executor/test_task_executor.py
+++ b/test/units/executor/test_task_executor.py
@@ -220,6 +220,11 @@ class TestTaskExecutor(unittest.TestCase):
mock_task.changed_when = None
mock_task.failed_when = None
mock_task.post_validate.return_value = None
+ # mock_task.async cannot be left unset, because on Python 3 MagicMock()
+ # > 0 raises a TypeError There are two reasons for using the value 1
+ # here: on Python 2 comparing MagicMock() > 0 returns True, and the
+ # other reason is that if I specify 0 here, the test fails. ;)
+ mock_task.async = 1
mock_play_context = MagicMock()
mock_play_context.post_validate.return_value = None
@@ -282,7 +287,9 @@ class TestTaskExecutor(unittest.TestCase):
mock_action = MagicMock()
- shared_loader = None
+ shared_loader = MagicMock()
+ shared_loader.action_loader = action_loader
+
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
diff --git a/v1/ansible/__init__.py b/test/units/inventory/__init__.py
index ba5ca83b72..785fc45992 100644
--- a/v1/ansible/__init__.py
+++ b/test/units/inventory/__init__.py
@@ -14,5 +14,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-__version__ = '2.0.0'
-__author__ = 'Michael DeHaan'
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/inventory/test_inventory.py b/test/units/inventory/test_inventory.py
new file mode 100644
index 0000000000..e7bcceb85d
--- /dev/null
+++ b/test/units/inventory/test_inventory.py
@@ -0,0 +1,100 @@
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import string
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.inventory import Inventory
+from ansible.vars import VariableManager
+
+from units.mock.loader import DictDataLoader
+
+class TestInventory(unittest.TestCase):
+
+ patterns = {
+ 'a': ['a'],
+ 'a, b': ['a', 'b'],
+ 'a , b': ['a', 'b'],
+ ' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
+ '9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
+ '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
+ '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo'],
+ 'foo[1:2]': ['foo[1:2]'],
+ 'a::b': ['a::b'],
+ 'a:b': ['a', 'b'],
+ ' a : b ': ['a', 'b'],
+ 'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
+ }
+
+ pattern_lists = [
+ [['a'], ['a']],
+ [['a', 'b'], ['a', 'b']],
+ [['a, b'], ['a', 'b']],
+ [['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
+ ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo']]
+ ]
+
+ # pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
+ # a,b are the bounds of the subscript; x..z are the results of the subscript
+ # when applied to string.ascii_letters.
+
+ subscripts = {
+ 'a': [('a',None), list(string.ascii_letters)],
+ 'a[0]': [('a', (0, None)), ['a']],
+ 'a[1]': [('a', (1, None)), ['b']],
+ 'a[2:3]': [('a', (2, 3)), ['c', 'd']],
+ 'a[-1]': [('a', (-1, None)), ['Z']],
+ 'a[-2]': [('a', (-2, None)), ['Y']],
+ 'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
+ 'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
+ 'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
+ }
+
+ def setUp(self):
+ v = VariableManager()
+ fake_loader = DictDataLoader({})
+
+ self.i = Inventory(loader=fake_loader, variable_manager=v, host_list='')
+
+ def test_split_patterns(self):
+
+ for p in self.patterns:
+ r = self.patterns[p]
+ self.assertEqual(r, self.i._split_pattern(p))
+
+ for p, r in self.pattern_lists:
+ self.assertEqual(r, self.i._split_pattern(p))
+
+ def test_ranges(self):
+
+ for s in self.subscripts:
+ r = self.subscripts[s]
+ self.assertEqual(r[0], self.i._split_subscript(s))
+ self.assertEqual(
+ r[1],
+ self.i._apply_subscript(
+ list(string.ascii_letters),
+ r[0][1]
+ )
+ )
diff --git a/test/units/mock/loader.py b/test/units/mock/loader.py
index 88f3970913..db69b862a7 100644
--- a/test/units/mock/loader.py
+++ b/test/units/mock/loader.py
@@ -57,6 +57,10 @@ class DictDataLoader(DataLoader):
def list_directory(self, path):
return [x for x in self._known_directories]
+ def is_executable(self, path):
+ # FIXME: figure out a way to make paths return true for this
+ return False
+
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py
index e1e3399b93..1a2fbefd43 100644
--- a/test/units/module_utils/test_basic.py
+++ b/test/units/module_utils/test_basic.py
@@ -314,7 +314,7 @@ class TestModuleUtilsBasic(unittest.TestCase):
base_params = dict(
path = '/path/to/file',
- mode = 0600,
+ mode = 0o600,
owner = 'root',
group = 'root',
seuser = '_default',
@@ -711,9 +711,9 @@ class TestModuleUtilsBasic(unittest.TestCase):
)
mock_stat1 = MagicMock()
- mock_stat1.st_mode = 0444
+ mock_stat1.st_mode = 0o444
mock_stat2 = MagicMock()
- mock_stat2.st_mode = 0660
+ mock_stat2.st_mode = 0o660
with patch('os.lstat', side_effect=[mock_stat1]):
self.assertEqual(am.set_mode_if_different('/path/to/file', None, True), True)
@@ -723,13 +723,13 @@ class TestModuleUtilsBasic(unittest.TestCase):
with patch('os.lstat') as m:
with patch('os.lchmod', return_value=None, create=True) as m_os:
m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
- self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True)
- m_os.assert_called_with('/path/to/file', 0660)
+ self.assertEqual(am.set_mode_if_different('/path/to/file', 0o660, False), True)
+ m_os.assert_called_with('/path/to/file', 0o660)
m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
- am._symbolic_mode_to_octal = MagicMock(return_value=0660)
+ am._symbolic_mode_to_octal = MagicMock(return_value=0o660)
self.assertEqual(am.set_mode_if_different('/path/to/file', 'o+w,g+w,a-r', False), True)
- m_os.assert_called_with('/path/to/file', 0660)
+ m_os.assert_called_with('/path/to/file', 0o660)
m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
am._symbolic_mode_to_octal = MagicMock(side_effect=Exception)
@@ -737,7 +737,7 @@ class TestModuleUtilsBasic(unittest.TestCase):
m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
am.check_mode = True
- self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True)
+ self.assertEqual(am.set_mode_if_different('/path/to/file', 0o660, False), True)
am.check_mode = False
# FIXME: this isn't working yet
@@ -746,11 +746,11 @@ class TestModuleUtilsBasic(unittest.TestCase):
# del m_os.lchmod
# with patch('os.path.islink', return_value=False):
# with patch('os.chmod', return_value=None) as m_chmod:
- # self.assertEqual(am.set_mode_if_different('/path/to/file/no_lchmod', 0660, False), True)
- # m_chmod.assert_called_with('/path/to/file', 0660)
+ # self.assertEqual(am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False), True)
+ # m_chmod.assert_called_with('/path/to/file', 0o660)
# with patch('os.path.islink', return_value=True):
# with patch('os.chmod', return_value=None) as m_chmod:
# with patch('os.stat', return_value=mock_stat2):
- # self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True)
- # m_chmod.assert_called_with('/path/to/file', 0660)
+ # self.assertEqual(am.set_mode_if_different('/path/to/file', 0o660, False), True)
+ # m_chmod.assert_called_with('/path/to/file', 0o660)
diff --git a/test/units/parsing/test_addresses.py b/test/units/parsing/test_addresses.py
new file mode 100644
index 0000000000..bb6e51b22a
--- /dev/null
+++ b/test/units/parsing/test_addresses.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+
+from ansible.parsing.utils.addresses import parse_address
+
+class TestParseAddress(unittest.TestCase):
+
+ tests = {
+ # IPv4 addresses
+ '192.0.2.3': ['192.0.2.3', None],
+ '192.0.2.3:23': ['192.0.2.3', 23],
+
+ # IPv6 addresses
+ '::': ['::', None],
+ '::1': ['::1', None],
+ '[::1]:442': ['::1', 442],
+ 'abcd:ef98:7654:3210:abcd:ef98:7654:3210': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', None],
+ '[abcd:ef98:7654:3210:abcd:ef98:7654:3210]:42': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', 42],
+
+ # Hostnames
+ 'some-host': ['some-host', None],
+ 'some-host:80': ['some-host', 80],
+ 'some.host.com:492': ['some.host.com', 492],
+ '[some.host.com]:493': ['some.host.com', 493],
+ 'a-b.3foo_bar.com:23': ['a-b.3foo_bar.com', 23],
+ u'fóöbär': [u'fóöbär', None],
+ u'fóöbär:32': [u'fóöbär', 32],
+ u'fóöbär.éxàmplê.com:632': [u'fóöbär.éxàmplê.com', 632],
+
+ # Various errors
+ '': [None, None],
+ 'some..host': [None, None],
+ 'some.': [None, None],
+ '[example.com]': [None, None],
+ 'some-': [None, None],
+ 'some-.foo.com': [None, None],
+ 'some.-foo.com': [None, None],
+ }
+
+ range_tests = {
+ '192.0.2.[3:10]': ['192.0.2.[3:10]', None],
+ '192.0.2.[3:10]:23': ['192.0.2.[3:10]', 23],
+ 'abcd:ef98::7654:[1:9]': ['abcd:ef98::7654:[1:9]', None],
+ '[abcd:ef98::7654:[6:32]]:2222': ['abcd:ef98::7654:[6:32]', 2222],
+ '[abcd:ef98::7654:[9ab3:fcb7]]:2222': ['abcd:ef98::7654:[9ab3:fcb7]', 2222],
+ u'fóöb[a:c]r.éxàmplê.com:632': [u'fóöb[a:c]r.éxàmplê.com', 632],
+ '[a:b]foo.com': ['[a:b]foo.com', None],
+ 'foo[a:b].com': ['foo[a:b].com', None],
+ 'foo[a:b]:42': ['foo[a:b]', 42],
+ 'foo[a-b]-.com': [None, None],
+ 'foo[a-b]:32': [None, None],
+ 'foo[x-y]': [None, None],
+ }
+
+ def test_without_ranges(self):
+ for t in self.tests:
+ test = self.tests[t]
+
+ (host, port) = parse_address(t)
+ assert host == test[0]
+ assert port == test[1]
+
+ def test_with_ranges(self):
+ for t in self.range_tests:
+ test = self.range_tests[t]
+
+ (host, port) = parse_address(t, allow_ranges=True)
+ assert host == test[0]
+ assert port == test[1]
diff --git a/test/units/parsing/test_data_loader.py b/test/units/parsing/test_data_loader.py
index b9c37cdd0c..ecb10b09b8 100644
--- a/test/units/parsing/test_data_loader.py
+++ b/test/units/parsing/test_data_loader.py
@@ -19,7 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from six import PY2
+from six import PY3
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
@@ -66,7 +66,8 @@ class TestDataLoader(unittest.TestCase):
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
- self._loader = DataLoader(vault_password='ansible')
+ self._loader = DataLoader()
+ self._loader.set_vault_password('ansible')
def tearDown(self):
pass
@@ -80,10 +81,10 @@ class TestDataLoaderWithVault(unittest.TestCase):
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
- if PY2:
- builtins_name = '__builtin__'
- else:
+ if PY3:
builtins_name = 'builtins'
+ else:
+ builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)):
output = self._loader.load_from_file('dummy_vault.txt')
diff --git a/test/units/parsing/test_mod_args.py b/test/units/parsing/test_mod_args.py
index bce31d6f1f..1d5f817cb0 100644
--- a/test/units/parsing/test_mod_args.py
+++ b/test/units/parsing/test_mod_args.py
@@ -112,7 +112,7 @@ class TestModArgsDwim(unittest.TestCase):
mod, args, connection = m.parse()
self._debug(mod, args, connection)
self.assertEqual(mod, 'copy')
- self.assertEqual(args, dict(src='a', dest='b'))
+ self.assertEqual(args, dict(src='a', dest='b', _local_action=True))
self.assertIs(connection, 'local')
def test_multiple_actions(self):
diff --git a/test/units/parsing/vault/test_vault.py b/test/units/parsing/vault/test_vault.py
index b627158c79..ac5a500173 100644
--- a/test/units/parsing/vault/test_vault.py
+++ b/test/units/parsing/vault/test_vault.py
@@ -104,9 +104,10 @@ class TestVaultLib(unittest.TestCase):
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
- enc_data = v.encrypt("foobar")
+ # AES encryption code has been removed, so this is old output for
+ # AES-encrypted 'foobar' with password 'ansible'.
+ enc_data = '$ANSIBLE_VAULT;1.1;AES\n53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3\nfe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e\n786a5a15efeb787e1958cbdd480d076c\n'
dec_data = v.decrypt(enc_data)
- assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
diff --git a/test/units/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py
index 2ddf3de27a..e943b00868 100644
--- a/test/units/parsing/vault/test_vault_editor.py
+++ b/test/units/parsing/vault/test_vault_editor.py
@@ -81,7 +81,7 @@ class TestVaultEditor(unittest.TestCase):
pass
def test_methods_exist(self):
- v = VaultEditor(None, None, None)
+ v = VaultEditor(None)
slots = ['create_file',
'decrypt_file',
'edit_file',
@@ -103,8 +103,8 @@ class TestVaultEditor(unittest.TestCase):
tmp_file = tempfile.NamedTemporaryFile()
os.unlink(tmp_file.name)
- ve = VaultEditor(None, "ansible", tmp_file.name)
- ve.create_file()
+ ve = VaultEditor("ansible")
+ ve.create_file(tmp_file.name)
self.assertTrue(os.path.exists(tmp_file.name))
@@ -120,12 +120,12 @@ class TestVaultEditor(unittest.TestCase):
with v10_file as f:
f.write(to_bytes(v10_data))
- ve = VaultEditor(None, "ansible", v10_file.name)
+ ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
- ve.decrypt_file()
+ ve.decrypt_file(v10_file.name)
except errors.AnsibleError as e:
error_hit = True
@@ -148,12 +148,12 @@ class TestVaultEditor(unittest.TestCase):
with v11_file as f:
f.write(to_bytes(v11_data))
- ve = VaultEditor(None, "ansible", v11_file.name)
+ ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
- ve.decrypt_file()
+ ve.decrypt_file(v11_file.name)
except errors.AnsibleError as e:
error_hit = True
@@ -180,12 +180,12 @@ class TestVaultEditor(unittest.TestCase):
with v10_file as f:
f.write(to_bytes(v10_data))
- ve = VaultEditor(None, "ansible", v10_file.name)
+ ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
- ve.rekey_file('ansible2')
+ ve.rekey_file(v10_file.name, 'ansible2')
except errors.AnsibleError as e:
error_hit = True
diff --git a/test/units/playbook/test_attribute.py b/test/units/playbook/test_attribute.py
new file mode 100644
index 0000000000..10217a7ce9
--- /dev/null
+++ b/test/units/playbook/test_attribute.py
@@ -0,0 +1,55 @@
+# (c) 2015, Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from ansible.compat.tests import unittest
+from ansible.playbook.attribute import Attribute
+
+
+class TestAttribute(unittest.TestCase):
+
+ def setUp(self):
+ self.one = Attribute(priority=100)
+ self.two = Attribute(priority=0)
+
+ def test_eq(self):
+ self.assertTrue(self.one == self.one)
+ self.assertFalse(self.one == self.two)
+
+ def test_ne(self):
+ self.assertFalse(self.one != self.one)
+ self.assertTrue(self.one != self.two)
+
+ def test_lt(self):
+ self.assertFalse(self.one < self.one)
+ self.assertTrue(self.one < self.two)
+ self.assertFalse(self.two < self.one)
+
+ def test_gt(self):
+ self.assertFalse(self.one > self.one)
+ self.assertFalse(self.one > self.two)
+ self.assertTrue(self.two > self.one)
+
+ def test_le(self):
+ self.assertTrue(self.one <= self.one)
+ self.assertTrue(self.one <= self.two)
+ self.assertFalse(self.two <= self.one)
+
+ def test_ge(self):
+ self.assertTrue(self.one >= self.one)
+ self.assertFalse(self.one >= self.two)
+ self.assertTrue(self.two >= self.one)
+
diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py
index cfa2705fa9..2a3aab23ce 100644
--- a/test/units/playbook/test_play_context.py
+++ b/test/units/playbook/test_play_context.py
@@ -92,6 +92,7 @@ class TestPlayContext(unittest.TestCase):
mock_task.become_user = 'mocktaskroot'
mock_task.become_pass = 'mocktaskpass'
mock_task.no_log = False
+ mock_task._local_action = False
all_vars = dict(
ansible_connection = 'mock_inventory',
@@ -115,10 +116,10 @@ class TestPlayContext(unittest.TestCase):
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
- sudo_exe = C.DEFAULT_SUDO_EXE
- sudo_flags = C.DEFAULT_SUDO_FLAGS + " -n "
- su_exe = C.DEFAULT_SU_EXE
- su_flags = C.DEFAULT_SU_FLAGS
+ sudo_exe = C.DEFAULT_SUDO_EXE or 'sudo'
+ sudo_flags = C.DEFAULT_SUDO_FLAGS
+ su_exe = C.DEFAULT_SU_EXE or 'su'
+ su_flags = C.DEFAULT_SU_FLAGS or ''
pbrun_exe = 'pbrun'
pbrun_flags = ''
pfexec_exe = 'pfexec'
@@ -134,7 +135,12 @@ class TestPlayContext(unittest.TestCase):
play_context.become_method = 'sudo'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
- self.assertEqual(cmd, """%s -c '%s %s -S -p "%s" -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_flags, play_context.prompt, play_context.become_user, default_exe, play_context.success_key, default_cmd))
+ self.assertEqual(cmd, """%s -c '%s %s -n -S -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_flags, play_context.become_user, default_exe, play_context.success_key, default_cmd))
+ play_context.become_pass = 'testpass'
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+ self.assertEqual(cmd, """%s -c '%s %s -p "%s" -S -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_flags, play_context.prompt, play_context.become_user, default_exe, play_context.success_key, default_cmd))
+
+ play_context.become_pass = None
play_context.become_method = 'su'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py
new file mode 100644
index 0000000000..24fd325993
--- /dev/null
+++ b/test/units/plugins/action/test_action.py
@@ -0,0 +1,57 @@
+# (c) 2015, Florian Apolloner <florian@apolloner.eu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible import constants as C
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import Mock
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.action import ActionBase
+
+
+class TestActionBase(unittest.TestCase):
+
+ def test_sudo_only_if_user_differs(self):
+ play_context = PlayContext()
+ action_base = ActionBase(None, None, play_context, None, None, None)
+ action_base._connection = Mock(exec_command=Mock(return_value=(0, '', '', '')))
+
+ play_context.become = True
+ play_context.become_user = play_context.remote_user = 'root'
+ play_context.make_become_cmd = Mock(return_value='CMD')
+
+ action_base._low_level_execute_command('ECHO', '/tmp', sudoable=True)
+ play_context.make_become_cmd.assert_not_called()
+
+ play_context.remote_user = 'apo'
+ action_base._low_level_execute_command('ECHO', '/tmp', sudoable=True)
+ play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None)
+
+ play_context.make_become_cmd.reset_mock()
+
+ become_allow_same_user = C.BECOME_ALLOW_SAME_USER
+ C.BECOME_ALLOW_SAME_USER = True
+ try:
+ play_context.remote_user = 'root'
+ action_base._low_level_execute_command('ECHO SAME', '/tmp', sudoable=True)
+ play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None)
+ finally:
+ C.BECOME_ALLOW_SAME_USER = become_allow_same_user
diff --git a/test/units/plugins/action/test_add_host.py b/test/units/plugins/action/test_add_host.py
deleted file mode 100644
index c694d387a3..0000000000
--- a/test/units/plugins/action/test_add_host.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import unittest
-
-from ansible.plugins.action import add_host
-
-
-class TestAddHost(unittest.TestCase):
-
- def test_hostname(self):
- host, port = add_host._parse_ip_host_and_port('some-remote-host')
- assert host == 'some-remote-host'
- assert port is None
-
- def test_hostname_with_port(self):
- host, port = add_host._parse_ip_host_and_port('some-remote-host:80')
- assert host == 'some-remote-host'
- assert port == '80'
-
- def test_parse_ip_host_and_port_v4(self):
- host, port = add_host._parse_ip_host_and_port('8.8.8.8')
- assert host == '8.8.8.8'
- assert port is None
-
- def test_parse_ip_host_and_port_v4_and_port(self):
- host, port = add_host._parse_ip_host_and_port('8.8.8.8:80')
- assert host == '8.8.8.8'
- assert port == '80'
-
- def test_parse_ip_host_and_port_v6(self):
- host, port = add_host._parse_ip_host_and_port(
- 'dead:beef:dead:beef:dead:beef:dead:beef'
- )
- assert host == 'dead:beef:dead:beef:dead:beef:dead:beef'
- assert port is None
-
- def test_parse_ip_host_and_port_v6_with_brackets(self):
- host, port = add_host._parse_ip_host_and_port(
- '[dead:beef:dead:beef:dead:beef:dead:beef]'
- )
- assert host == 'dead:beef:dead:beef:dead:beef:dead:beef'
- assert port is None
-
- def test_parse_ip_host_and_port_v6_with_brackets_and_port(self):
- host, port = add_host._parse_ip_host_and_port(
- '[dead:beef:dead:beef:dead:beef:dead:beef]:80'
- )
- assert host == 'dead:beef:dead:beef:dead:beef:dead:beef'
- assert port == '80'
diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py
index f3cfe6a38c..af1d924910 100644
--- a/test/units/plugins/cache/test_cache.py
+++ b/test/units/plugins/cache/test_cache.py
@@ -19,7 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from ansible.compat.tests import unittest
+from ansible.compat.tests import unittest, mock
+from ansible.plugins.cache import FactCache
from ansible.plugins.cache.base import BaseCacheModule
from ansible.plugins.cache.memory import CacheModule as MemoryCache
@@ -42,6 +43,20 @@ else:
from ansible.plugins.cache.redis import CacheModule as RedisCache
+class TestFactCache(unittest.TestCase):
+
+ def setUp(self):
+ with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
+ self.cache = FactCache()
+
+ def test_copy(self):
+ self.cache['avocado'] = 'fruit'
+ self.cache['daisy'] = 'flower'
+ a_copy = self.cache.copy()
+ self.assertEqual(type(a_copy), dict)
+ self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
+
+
class TestAbstractClass(unittest.TestCase):
def setUp(self):
diff --git a/test/units/plugins/connections/test_connection.py b/test/units/plugins/connections/test_connection.py
index aba94b5a61..10fa44216d 100644
--- a/test/units/plugins/connections/test_connection.py
+++ b/test/units/plugins/connections/test_connection.py
@@ -24,16 +24,16 @@ from six import StringIO
from ansible.compat.tests import unittest
from ansible.playbook.play_context import PlayContext
-from ansible.plugins.connections import ConnectionBase
-#from ansible.plugins.connections.accelerate import Connection as AccelerateConnection
-#from ansible.plugins.connections.chroot import Connection as ChrootConnection
-#from ansible.plugins.connections.funcd import Connection as FuncdConnection
-#from ansible.plugins.connections.jail import Connection as JailConnection
-#from ansible.plugins.connections.libvirt_lxc import Connection as LibvirtLXCConnection
-from ansible.plugins.connections.local import Connection as LocalConnection
-from ansible.plugins.connections.paramiko_ssh import Connection as ParamikoConnection
-from ansible.plugins.connections.ssh import Connection as SSHConnection
-#from ansible.plugins.connections.winrm import Connection as WinRmConnection
+from ansible.plugins.connection import ConnectionBase
+#from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
+#from ansible.plugins.connection.chroot import Connection as ChrootConnection
+#from ansible.plugins.connection.funcd import Connection as FuncdConnection
+#from ansible.plugins.connection.jail import Connection as JailConnection
+#from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
+from ansible.plugins.connection.local import Connection as LocalConnection
+from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
+from ansible.plugins.connection.ssh import Connection as SSHConnection
+#from ansible.plugins.connection.winrm import Connection as WinRmConnection
class TestConnectionBaseClass(unittest.TestCase):
diff --git a/test/units/plugins/lookup/test_password.py b/test/units/plugins/lookup/test_password.py
new file mode 100644
index 0000000000..46a5bd2be6
--- /dev/null
+++ b/test/units/plugins/lookup/test_password.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.compat.tests import unittest
+
+from ansible.plugins.lookup.password import LookupModule, _parse_parameters, DEFAULT_LENGTH
+
+DEFAULT_CHARS = sorted([u'ascii_letters', u'digits', u".,:-_"])
+
+class TestPasswordLookup(unittest.TestCase):
+
+ # Currently there isn't a new-style
+ old_style_params_data = (
+ # Simple case
+ dict(term=u'/path/to/file',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+
+ # Special characters in path
+ dict(term=u'/path/with/embedded spaces and/file',
+ filename=u'/path/with/embedded spaces and/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+ dict(term=u'/path/with/equals/cn=com.ansible',
+ filename=u'/path/with/equals/cn=com.ansible',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+ dict(term=u'/path/with/unicode/くらとみ/file',
+ filename=u'/path/with/unicode/くらとみ/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+ # Mix several special chars
+ dict(term=u'/path/with/utf 8 and spaces/くらとみ/file',
+ filename=u'/path/with/utf 8 and spaces/くらとみ/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+ dict(term=u'/path/with/encoding=unicode/くらとみ/file',
+ filename=u'/path/with/encoding=unicode/くらとみ/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+ dict(term=u'/path/with/encoding=unicode/くらとみ/and spaces file',
+ filename=u'/path/with/encoding=unicode/くらとみ/and spaces file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+
+ # Simple parameters
+ dict(term=u'/path/to/file length=42',
+ filename=u'/path/to/file',
+ params=dict(length=42, encrypt=None, chars=DEFAULT_CHARS)
+ ),
+ dict(term=u'/path/to/file encrypt=pbkdf2_sha256',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt='pbkdf2_sha256', chars=DEFAULT_CHARS)
+ ),
+ dict(term=u'/path/to/file chars=abcdefghijklmnop',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u'abcdefghijklmnop'])
+ ),
+ dict(term=u'/path/to/file chars=digits,abc,def',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc', u'def']))
+ ),
+ # Including comma in chars
+ dict(term=u'/path/to/file chars=abcdefghijklmnop,,digits',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=sorted([u'abcdefghijklmnop', u',', u'digits']))
+ ),
+ dict(term=u'/path/to/file chars=,,',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u','])
+ ),
+
+ # Including = in chars
+ dict(term=u'/path/to/file chars=digits,=,,',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'=', u',']))
+ ),
+ dict(term=u'/path/to/file chars=digits,abc=def',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc=def']))
+ ),
+
+ # Including unicode in chars
+ dict(term=u'/path/to/file chars=digits,くらとみ,,',
+ filename=u'/path/to/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'くらとみ', u',']))
+ ),
+
+ # Including special chars in both path and chars
+ # Special characters in path
+ dict(term=u'/path/with/embedded spaces and/file chars=abc=def',
+ filename=u'/path/with/embedded spaces and/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def'])
+ ),
+ dict(term=u'/path/with/equals/cn=com.ansible chars=abc=def',
+ filename=u'/path/with/equals/cn=com.ansible',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def'])
+ ),
+ dict(term=u'/path/with/unicode/くらとみ/file chars=くらとみ',
+ filename=u'/path/with/unicode/くらとみ/file',
+ params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ'])
+ ),
+ )
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_parse_parameters(self):
+ for testcase in self.old_style_params_data:
+ filename, params = _parse_parameters(testcase['term'])
+ params['chars'].sort()
+ self.assertEqual(filename, testcase['filename'])
+ self.assertEqual(params, testcase['params'])
diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py
index eb85b9c771..bf01cf6fcc 100644
--- a/test/units/plugins/strategies/test_strategy_base.py
+++ b/test/units/plugins/strategies/test_strategy_base.py
@@ -23,7 +23,7 @@ from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.plugins.strategies import StrategyBase
+from ansible.plugins.strategy import StrategyBase
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
diff --git a/test/units/plugins/test_plugins.py b/test/units/plugins/test_plugins.py
index 0d0fe400d0..76a2600f53 100644
--- a/test/units/plugins/test_plugins.py
+++ b/test/units/plugins/test_plugins.py
@@ -26,7 +26,7 @@ from ansible.compat.tests import BUILTINS
from ansible.compat.tests.mock import mock_open, patch, MagicMock
-from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, _basedirs, push_basedir, PluginLoader
+from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, PluginLoader
class TestErrors(unittest.TestCase):
diff --git a/test/units/template/test_safe_eval.py b/test/units/template/test_safe_eval.py
index 785fc45992..531244d15a 100644
--- a/test/units/template/test_safe_eval.py
+++ b/test/units/template/test_safe_eval.py
@@ -19,3 +19,33 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import sys
+from collections import defaultdict
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock
+
+from ansible.template.safe_eval import safe_eval
+
+class TestSafeEval(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_safe_eval_usage(self):
+ # test safe eval calls with different possible types for the
+ # locals dictionary, to ensure we don't run into problems like
+ # ansible/ansible/issues/12206 again
+ for locals_vars in (dict(), defaultdict(dict)):
+ self.assertEqual(safe_eval('True', locals=locals_vars), True)
+ self.assertEqual(safe_eval('False', locals=locals_vars), False)
+ self.assertEqual(safe_eval('0', locals=locals_vars), 0)
+ self.assertEqual(safe_eval('[]', locals=locals_vars), [])
+ self.assertEqual(safe_eval('{}', locals=locals_vars), {})
+
+ @unittest.skipUnless(sys.version_info[:2] >= (2, 7), "Python 2.6 has no set literals")
+ def test_set_literals(self):
+ self.assertEqual(safe_eval('{0}'), set([0]))
diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py
index e4627c2c61..a029754f6e 100644
--- a/test/units/template/test_templar.py
+++ b/test/units/template/test_templar.py
@@ -25,7 +25,7 @@ from ansible.compat.tests.mock import patch, MagicMock
from ansible import constants as C
from ansible.errors import *
from ansible.plugins import filter_loader, lookup_loader, module_loader
-from ansible.plugins.strategies import SharedPluginLoaderObj
+from ansible.plugins.strategy import SharedPluginLoaderObj
from ansible.template import Templar
from units.mock.loader import DictDataLoader
@@ -33,12 +33,6 @@ from units.mock.loader import DictDataLoader
class TestTemplar(unittest.TestCase):
def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- def test_templar_simple(self):
fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
@@ -54,12 +48,19 @@ class TestTemplar(unittest.TestCase):
var_list=[1],
recursive="{{recursive}}",
)
- templar = Templar(loader=fake_loader, variables=variables)
+ self.templar = Templar(loader=fake_loader, variables=variables)
+ def tearDown(self):
+ pass
+
+ def test_templar_simple(self):
+
+ templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
- self.assertEqual(templar.template("{{foo}}\n"), "bar")
+ self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
+ self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("foo", convert_bare=True), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
@@ -88,6 +89,20 @@ class TestTemplar(unittest.TestCase):
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
+ def test_templar_escape_backslashes(self):
+ # Rule of thumb: If escape backslashes is True you should end up with
+ # the same number of backslashes as when you started.
+ self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
+ self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
+ self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
+ self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
+ self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
+
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
diff --git a/test/units/template/test_template_utilities.py b/test/units/template/test_template_utilities.py
new file mode 100644
index 0000000000..da0ed0db5e
--- /dev/null
+++ b/test/units/template/test_template_utilities.py
@@ -0,0 +1,114 @@
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import jinja2
+from ansible.compat.tests import unittest
+
+from ansible.template import _escape_backslashes, _count_newlines_from_end
+
+# These are internal utility functions only needed for templating. They're
+# algorithmic so good candidates for unittesting by themselves
+
+class TestBackslashEscape(unittest.TestCase):
+
+ test_data = (
+ # Test backslashes in a filter arg are double escaped
+ dict(
+ template=u"{{ 'test2 %s' | format('\\1') }}",
+ intermediate=u"{{ 'test2 %s' | format('\\\\1') }}",
+ expectation=u"test2 \\1",
+ args=dict()
+ ),
+ # Test backslashes inside the jinja2 var itself are double
+ # escaped
+ dict(
+ template=u"Test 2\\3: {{ '\\1 %s' | format('\\2') }}",
+ intermediate=u"Test 2\\3: {{ '\\\\1 %s' | format('\\\\2') }}",
+ expectation=u"Test 2\\3: \\1 \\2",
+ args=dict()
+ ),
+ # Test backslashes outside of the jinja2 var are not double
+ # escaped
+ dict(
+ template=u"Test 2\\3: {{ 'test2 %s' | format('\\1') }}; \\done",
+ intermediate=u"Test 2\\3: {{ 'test2 %s' | format('\\\\1') }}; \\done",
+ expectation=u"Test 2\\3: test2 \\1; \\done",
+ args=dict()
+ ),
+ # Test backslashes in a variable sent to a filter are handled
+ dict(
+ template=u"{{ 'test2 %s' | format(var1) }}",
+ intermediate=u"{{ 'test2 %s' | format(var1) }}",
+ expectation=u"test2 \\1",
+ args=dict(var1=u'\\1')
+ ),
+ # Test backslashes in a variable expanded by jinja2 are double
+ # escaped
+ dict(
+ template=u"Test 2\\3: {{ var1 | format('\\2') }}",
+ intermediate=u"Test 2\\3: {{ var1 | format('\\\\2') }}",
+ expectation=u"Test 2\\3: \\1 \\2",
+ args=dict(var1=u'\\1 %s')
+ ),
+ )
+ def setUp(self):
+ self.env = jinja2.Environment()
+
+ def tearDown(self):
+ pass
+
+ def test_backslash_escaping(self):
+
+ for test in self.test_data:
+ intermediate = _escape_backslashes(test['template'], self.env)
+ self.assertEquals(intermediate, test['intermediate'])
+ template = jinja2.Template(intermediate)
+ args = test['args']
+ self.assertEquals(template.render(**args), test['expectation'])
+
+class TestCountNewlines(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_zero_length_string(self):
+ self.assertEquals(_count_newlines_from_end(u''), 0)
+
+ def test_short_string(self):
+ self.assertEquals(_count_newlines_from_end(u'The quick\n'), 1)
+
+ def test_one_newline(self):
+ self.assertEquals(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n'), 1)
+
+ def test_multiple_newlines(self):
+ self.assertEquals(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n\n\n'), 3)
+
+ def test_zero_newlines(self):
+ self.assertEquals(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000), 0)
+
+ def test_all_newlines(self):
+ self.assertEquals(_count_newlines_from_end(u'\n' * 10), 10)
+
+ def test_mostly_newlines(self):
+ self.assertEquals(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' + u'\n' * 1000), 1000)
diff --git a/v1/ansible/modules/__init__.py b/test/units/utils/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/v1/ansible/modules/__init__.py
+++ b/test/units/utils/__init__.py
diff --git a/test/units/utils/test_vars.py b/test/units/utils/test_vars.py
new file mode 100644
index 0000000000..aba05c41d4
--- /dev/null
+++ b/test/units/utils/test_vars.py
@@ -0,0 +1,98 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015, Toshio Kuraotmi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+
+from ansible.compat.tests import mock, unittest
+from ansible.errors import AnsibleError
+
+from ansible.utils.vars import combine_vars, merge_hash
+
+class TestVariableUtils(unittest.TestCase):
+
+ test_merge_data = (
+ dict(
+ a=dict(a=1),
+ b=dict(b=2),
+ result=dict(a=1, b=2)
+ ),
+ dict(
+ a=dict(a=1, c=dict(foo='bar')),
+ b=dict(b=2, c=dict(baz='bam')),
+ result=dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
+ ),
+ dict(
+ a=defaultdict(a=1, c=defaultdict(foo='bar')),
+ b=dict(b=2, c=dict(baz='bam')),
+ result=defaultdict(a=1, b=2, c=defaultdict(foo='bar', baz='bam'))
+ ),
+ )
+ test_replace_data = (
+ dict(
+ a=dict(a=1),
+ b=dict(b=2),
+ result=dict(a=1, b=2)
+ ),
+ dict(
+ a=dict(a=1, c=dict(foo='bar')),
+ b=dict(b=2, c=dict(baz='bam')),
+ result=dict(a=1, b=2, c=dict(baz='bam'))
+ ),
+ dict(
+ a=defaultdict(a=1, c=dict(foo='bar')),
+ b=dict(b=2, c=defaultdict(baz='bam')),
+ result=defaultdict(a=1, b=2, c=defaultdict(baz='bam'))
+ ),
+ )
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_merge_hash(self):
+ for test in self.test_merge_data:
+ self.assertEqual(merge_hash(test['a'], test['b']), test['result'])
+
+ def test_improper_args(self):
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
+ with self.assertRaises(AnsibleError):
+ combine_vars([1, 2, 3], dict(a=1))
+ with self.assertRaises(AnsibleError):
+ combine_vars(dict(a=1), [1, 2, 3])
+
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
+ with self.assertRaises(AnsibleError):
+ combine_vars([1, 2, 3], dict(a=1))
+ with self.assertRaises(AnsibleError):
+ combine_vars(dict(a=1), [1, 2, 3])
+
+ def test_combine_vars_replace(self):
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
+ for test in self.test_replace_data:
+ self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
+
+ def test_combine_vars_merge(self):
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
+ for test in self.test_merge_data:
+ self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py
index 9d500d04d8..688426cfef 100644
--- a/test/units/vars/test_variable_manager.py
+++ b/test/units/vars/test_variable_manager.py
@@ -19,9 +19,13 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from collections import defaultdict
+from six import iteritems
+
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
-
+from ansible.inventory import Inventory
+from ansible.playbook.play import Play
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
@@ -48,20 +52,6 @@ class TestVariableManager(unittest.TestCase):
self.assertEqual(vars, dict(playbook_dir='.'))
- self.assertEqual(
- v._merge_dicts(
- dict(a=1),
- dict(b=2)
- ), dict(a=1, b=2)
- )
- self.assertEqual(
- v._merge_dicts(
- dict(a=1, c=dict(foo='bar')),
- dict(b=2, c=dict(baz='bam'))
- ), dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
- )
-
-
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
@@ -71,7 +61,7 @@ class TestVariableManager(unittest.TestCase):
vars = v.get_vars(loader=fake_loader, use_cache=False)
- for (key, val) in extra_vars.iteritems():
+ for (key, val) in iteritems(extra_vars):
self.assertEqual(vars.get(key), val)
self.assertIsNot(v.extra_vars, extra_vars)
@@ -80,20 +70,33 @@ class TestVariableManager(unittest.TestCase):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
- """
+ """,
+ "other_path/host_vars/hostname1.yml": """
+ foo: bam
+ baa: bat
+ """,
+ "host_vars/host.name.yml": """
+ host_with_dots: true
+ """,
})
v = VariableManager()
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
+ v.add_host_vars_file("other_path/host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
- self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar"))
+ self.assertEqual(v._host_vars_files["hostname1"], [dict(foo="bar"), dict(foo="bam", baa="bat")])
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
+ mock_host.get_group_vars.return_value = dict()
- self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bar")
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bam")
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("baa"), "bat")
+
+ v.add_host_vars_file("host_vars/host.name", loader=fake_loader)
+ self.assertEqual(v._host_vars_files["host.name"], [dict(host_with_dots=True)])
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
@@ -102,15 +105,22 @@ class TestVariableManager(unittest.TestCase):
""",
"group_vars/somegroup.yml": """
bam: baz
- """
+ """,
+ "other_path/group_vars/somegroup.yml": """
+ baa: bat
+ """,
+ "group_vars/some.group.yml": """
+ group_with_dots: true
+ """,
})
v = VariableManager()
v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
+ v.add_group_vars_file("other_path/group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
- self.assertEqual(v._group_vars_files["all"], dict(foo="bar"))
- self.assertEqual(v._group_vars_files["somegroup"], dict(bam="baz"))
+ self.assertEqual(v._group_vars_files["all"], [dict(foo="bar")])
+ self.assertEqual(v._group_vars_files["somegroup"], [dict(bam="baz"), dict(baa="bat")])
mock_group = MagicMock()
mock_group.name = "somegroup"
@@ -121,10 +131,14 @@ class TestVariableManager(unittest.TestCase):
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = (mock_group,)
+ mock_host.get_group_vars.return_value = dict()
vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
self.assertEqual(vars.get("foo"), "bar")
- self.assertEqual(vars.get("bam"), "baz")
+ self.assertEqual(vars.get("baa"), "bat")
+
+ v.add_group_vars_file("group_vars/some.group", loader=fake_loader)
+ self.assertEqual(v._group_vars_files["some.group"], [dict(group_with_dots=True)])
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
@@ -162,3 +176,109 @@ class TestVariableManager(unittest.TestCase):
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
+ def test_variable_manager_precedence(self):
+ '''
+ Tests complex variations and combinations of get_vars() with different
+ objects to modify the context under which variables are merged.
+ '''
+
+ v = VariableManager()
+ v._fact_cache = defaultdict(dict)
+
+ fake_loader = DictDataLoader({
+ # inventory1
+ '/etc/ansible/inventory1': """
+ [group2:children]
+ group1
+
+ [group1]
+ host1 host_var=host_var_from_inventory_host1
+
+ [group1:vars]
+ group_var = group_var_from_inventory_group1
+
+ [group2:vars]
+ group_var = group_var_from_inventory_group2
+ """,
+
+ # role defaults_only1
+ '/etc/ansible/roles/defaults_only1/defaults/main.yml': """
+ default_var: "default_var_from_defaults_only1"
+ host_var: "host_var_from_defaults_only1"
+ group_var: "group_var_from_defaults_only1"
+ group_var_all: "group_var_all_from_defaults_only1"
+ extra_var: "extra_var_from_defaults_only1"
+ """,
+ '/etc/ansible/roles/defaults_only1/tasks/main.yml': """
+ - debug: msg="here i am"
+ """,
+
+ # role defaults_only2
+ '/etc/ansible/roles/defaults_only2/defaults/main.yml': """
+ default_var: "default_var_from_defaults_only2"
+ host_var: "host_var_from_defaults_only2"
+ group_var: "group_var_from_defaults_only2"
+ group_var_all: "group_var_all_from_defaults_only2"
+ extra_var: "extra_var_from_defaults_only2"
+ """,
+ })
+
+ inv1 = Inventory(loader=fake_loader, variable_manager=v, host_list='/etc/ansible/inventory1')
+ inv1.set_playbook_basedir('./')
+
+ play1 = Play.load(dict(
+ hosts=['all'],
+ roles=['defaults_only1', 'defaults_only2'],
+ ), loader=fake_loader, variable_manager=v)
+
+ # first we assert that the defaults as viewed as a whole are the merged results
+ # of the defaults from each role, with the last role defined "winning" when
+ # there is a variable naming conflict
+ res = v.get_vars(loader=fake_loader, play=play1)
+ self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
+
+ # next, we assert that when vars are viewed from the context of a task within a
+ # role, that task will see its own role defaults before any other role's
+ blocks = play1.compile()
+ task = blocks[1].block[0]
+ res = v.get_vars(loader=fake_loader, play=play1, task=task)
+ self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
+
+ # next we assert the precendence of inventory variables
+ v.set_inventory(inv1)
+ h1 = inv1.get_host('host1')
+
+ res = v.get_vars(loader=fake_loader, play=play1, host=h1)
+ self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
+ self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
+
+ # next we test with group_vars/ files loaded
+ fake_loader.push("/etc/ansible/group_vars/all", """
+ group_var_all: group_var_all_from_group_vars_all
+ """)
+ fake_loader.push("/etc/ansible/group_vars/group1", """
+ group_var: group_var_from_group_vars_group1
+ """)
+ fake_loader.push("/etc/ansible/group_vars/group3", """
+ # this is a dummy, which should not be used anywhere
+ group_var: group_var_from_group_vars_group3
+ """)
+ fake_loader.push("/etc/ansible/host_vars/host1", """
+ host_var: host_var_from_host_vars_host1
+ """)
+
+ v.add_group_vars_file("/etc/ansible/group_vars/all", loader=fake_loader)
+ v.add_group_vars_file("/etc/ansible/group_vars/group1", loader=fake_loader)
+ v.add_group_vars_file("/etc/ansible/group_vars/group2", loader=fake_loader)
+ v.add_host_vars_file("/etc/ansible/host_vars/host1", loader=fake_loader)
+
+ res = v.get_vars(loader=fake_loader, play=play1, host=h1)
+ self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
+ self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
+ self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
+
+ # add in the fact cache
+ v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
+
+ res = v.get_vars(loader=fake_loader, play=play1, host=h1)
+ self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
diff --git a/tox.ini b/tox.ini
index ad3d37b521..02a3d71f2b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = {py26,py27}
+envlist = {py26,py27,py34}
[testenv]
commands = make tests
@@ -9,7 +9,7 @@ whitelist_externals = make
[testenv:py26]
commands =
python --version
- python -m compileall -fq -x 'test|samples|contrib/inventory/vagrant.py' .
+ python -m compileall -fq -x 'test|samples|contrib/inventory/vagrant.py' lib test contrib
make tests
deps = -r{toxinidir}/test-requirements.txt
whitelist_externals =
@@ -18,7 +18,7 @@ whitelist_externals =
[testenv:py27]
commands =
python --version
- python -m compileall -fq -x 'test|samples' .
+ python -m compileall -fq -x 'test|samples' lib test contrib
make tests
deps = -r{toxinidir}/test-requirements.txt
whitelist_externals = make
@@ -26,7 +26,8 @@ whitelist_externals = make
[testenv:py34]
commands =
python --version
- python -m compileall -fq -x 'lib/ansible/module_utils' lib
- make tests
+ python -m compileall -fq -x 'lib/ansible/module_utils|lib/ansible/modules' lib test contrib
+ # Unittests need lots of work to make code python3 compatible
+ #make tests
deps = -r{toxinidir}/test-requirements.txt
whitelist_externals = make
diff --git a/v1/README.md b/v1/README.md
deleted file mode 100644
index 011851da06..0000000000
--- a/v1/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-This is dead code, it is here for convenience for those testing current devel so as to ascertain if a bug was introduced in the v2 rewrite or was preexisting in the 1.x codebase.
-Using this code should be equivalent of checking out the v1_last tag, which was devel at a point between 1.9.1 and 1.9.2 releases.
-The stable-1.9 is the maintenance branch for the 1.9.x code, which might continue to diverge from the v1/ tree as bugs get fixed.
-
-DO NOT:
-
- * use this code as reference
- * make PRs against this code
- * expect this code to be shipped with the 2.0 version of ansible
-
-
diff --git a/v1/ansible/cache/__init__.py b/v1/ansible/cache/__init__.py
deleted file mode 100644
index 4100861c14..0000000000
--- a/v1/ansible/cache/__init__.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from collections import MutableMapping
-
-from ansible import utils
-from ansible import constants as C
-from ansible import errors
-
-
-class FactCache(MutableMapping):
-
- def __init__(self, *args, **kwargs):
- self._plugin = utils.plugins.cache_loader.get(C.CACHE_PLUGIN)
- if self._plugin is None:
- return
-
- def __getitem__(self, key):
- if key not in self:
- raise KeyError
- return self._plugin.get(key)
-
- def __setitem__(self, key, value):
- self._plugin.set(key, value)
-
- def __delitem__(self, key):
- self._plugin.delete(key)
-
- def __contains__(self, key):
- return self._plugin.contains(key)
-
- def __iter__(self):
- return iter(self._plugin.keys())
-
- def __len__(self):
- return len(self._plugin.keys())
-
- def copy(self):
- """ Return a primitive copy of the keys and values from the cache. """
- return dict([(k, v) for (k, v) in self.iteritems()])
-
- def keys(self):
- return self._plugin.keys()
-
- def flush(self):
- """ Flush the fact cache of all keys. """
- self._plugin.flush()
diff --git a/v1/ansible/cache/base.py b/v1/ansible/cache/base.py
deleted file mode 100644
index b6254cdfd4..0000000000
--- a/v1/ansible/cache/base.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import exceptions
-
-class BaseCacheModule(object):
-
- def get(self, key):
- raise exceptions.NotImplementedError
-
- def set(self, key, value):
- raise exceptions.NotImplementedError
-
- def keys(self):
- raise exceptions.NotImplementedError
-
- def contains(self, key):
- raise exceptions.NotImplementedError
-
- def delete(self, key):
- raise exceptions.NotImplementedError
-
- def flush(self):
- raise exceptions.NotImplementedError
-
- def copy(self):
- raise exceptions.NotImplementedError
diff --git a/v1/ansible/cache/jsonfile.py b/v1/ansible/cache/jsonfile.py
deleted file mode 100644
index 0bade893a8..0000000000
--- a/v1/ansible/cache/jsonfile.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-import errno
-import codecs
-
-try:
- import simplejson as json
-except ImportError:
- import json
-
-from ansible import constants as C
-from ansible import utils
-from ansible.cache.base import BaseCacheModule
-
-class CacheModule(BaseCacheModule):
- """
- A caching module backed by json files.
- """
- def __init__(self, *args, **kwargs):
-
- self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
- self._cache = {}
- self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
- if not self._cache_dir:
- utils.exit("error, fact_caching_connection is not set, cannot use fact cache")
-
- if not os.path.exists(self._cache_dir):
- try:
- os.makedirs(self._cache_dir)
- except (OSError,IOError), e:
- utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
- return None
-
- def get(self, key):
-
- if key in self._cache:
- return self._cache.get(key)
-
- if self.has_expired(key):
- raise KeyError
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- f = codecs.open(cachefile, 'r', encoding='utf-8')
- except (OSError,IOError), e:
- utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
- else:
- value = json.load(f)
- self._cache[key] = value
- return value
- finally:
- f.close()
-
- def set(self, key, value):
-
- self._cache[key] = value
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- f = codecs.open(cachefile, 'w', encoding='utf-8')
- except (OSError,IOError), e:
- utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
- else:
- f.write(utils.jsonify(value))
- finally:
- f.close()
-
- def has_expired(self, key):
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- st = os.stat(cachefile)
- except (OSError,IOError), e:
- if e.errno == errno.ENOENT:
- return False
- else:
- utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
-
- if time.time() - st.st_mtime <= self._timeout:
- return False
-
- if key in self._cache:
- del self._cache[key]
- return True
-
- def keys(self):
- keys = []
- for k in os.listdir(self._cache_dir):
- if not (k.startswith('.') or self.has_expired(k)):
- keys.append(k)
- return keys
-
- def contains(self, key):
- cachefile = "%s/%s" % (self._cache_dir, key)
-
- if key in self._cache:
- return True
-
- if self.has_expired(key):
- return False
- try:
- st = os.stat(cachefile)
- return True
- except (OSError,IOError), e:
- if e.errno == errno.ENOENT:
- return False
- else:
- utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
-
- def delete(self, key):
- del self._cache[key]
- try:
- os.remove("%s/%s" % (self._cache_dir, key))
- except (OSError,IOError), e:
- pass #TODO: only pass on non existing?
-
- def flush(self):
- self._cache = {}
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- ret = dict()
- for key in self.keys():
- ret[key] = self.get(key)
- return ret
diff --git a/v1/ansible/cache/memcached.py b/v1/ansible/cache/memcached.py
deleted file mode 100644
index ea922434b5..0000000000
--- a/v1/ansible/cache/memcached.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import collections
-import os
-import sys
-import time
-import threading
-from itertools import chain
-
-from ansible import constants as C
-from ansible.cache.base import BaseCacheModule
-
-try:
- import memcache
-except ImportError:
- print 'python-memcached is required for the memcached fact cache'
- sys.exit(1)
-
-
-class ProxyClientPool(object):
- """
- Memcached connection pooling for thread/fork safety. Inspired by py-redis
- connection pool.
-
- Available connections are maintained in a deque and released in a FIFO manner.
- """
-
- def __init__(self, *args, **kwargs):
- self.max_connections = kwargs.pop('max_connections', 1024)
- self.connection_args = args
- self.connection_kwargs = kwargs
- self.reset()
-
- def reset(self):
- self.pid = os.getpid()
- self._num_connections = 0
- self._available_connections = collections.deque(maxlen=self.max_connections)
- self._locked_connections = set()
- self._lock = threading.Lock()
-
- def _check_safe(self):
- if self.pid != os.getpid():
- with self._lock:
- if self.pid == os.getpid():
- # bail out - another thread already acquired the lock
- return
- self.disconnect_all()
- self.reset()
-
- def get_connection(self):
- self._check_safe()
- try:
- connection = self._available_connections.popleft()
- except IndexError:
- connection = self.create_connection()
- self._locked_connections.add(connection)
- return connection
-
- def create_connection(self):
- if self._num_connections >= self.max_connections:
- raise RuntimeError("Too many memcached connections")
- self._num_connections += 1
- return memcache.Client(*self.connection_args, **self.connection_kwargs)
-
- def release_connection(self, connection):
- self._check_safe()
- self._locked_connections.remove(connection)
- self._available_connections.append(connection)
-
- def disconnect_all(self):
- for conn in chain(self._available_connections, self._locked_connections):
- conn.disconnect_all()
-
- def __getattr__(self, name):
- def wrapped(*args, **kwargs):
- return self._proxy_client(name, *args, **kwargs)
- return wrapped
-
- def _proxy_client(self, name, *args, **kwargs):
- conn = self.get_connection()
-
- try:
- return getattr(conn, name)(*args, **kwargs)
- finally:
- self.release_connection(conn)
-
-
-class CacheModuleKeys(collections.MutableSet):
- """
- A set subclass that keeps track of insertion time and persists
- the set in memcached.
- """
- PREFIX = 'ansible_cache_keys'
-
- def __init__(self, cache, *args, **kwargs):
- self._cache = cache
- self._keyset = dict(*args, **kwargs)
-
- def __contains__(self, key):
- return key in self._keyset
-
- def __iter__(self):
- return iter(self._keyset)
-
- def __len__(self):
- return len(self._keyset)
-
- def add(self, key):
- self._keyset[key] = time.time()
- self._cache.set(self.PREFIX, self._keyset)
-
- def discard(self, key):
- del self._keyset[key]
- self._cache.set(self.PREFIX, self._keyset)
-
- def remove_by_timerange(self, s_min, s_max):
- for k in self._keyset.keys():
- t = self._keyset[k]
- if s_min < t < s_max:
- del self._keyset[k]
- self._cache.set(self.PREFIX, self._keyset)
-
-
-class CacheModule(BaseCacheModule):
-
- def __init__(self, *args, **kwargs):
- if C.CACHE_PLUGIN_CONNECTION:
- connection = C.CACHE_PLUGIN_CONNECTION.split(',')
- else:
- connection = ['127.0.0.1:11211']
-
- self._timeout = C.CACHE_PLUGIN_TIMEOUT
- self._prefix = C.CACHE_PLUGIN_PREFIX
- self._cache = ProxyClientPool(connection, debug=0)
- self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
-
- def _make_key(self, key):
- return "{0}{1}".format(self._prefix, key)
-
- def _expire_keys(self):
- if self._timeout > 0:
- expiry_age = time.time() - self._timeout
- self._keys.remove_by_timerange(0, expiry_age)
-
- def get(self, key):
- value = self._cache.get(self._make_key(key))
- # guard against the key not being removed from the keyset;
- # this could happen in cases where the timeout value is changed
- # between invocations
- if value is None:
- self.delete(key)
- raise KeyError
- return value
-
- def set(self, key, value):
- self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
- self._keys.add(key)
-
- def keys(self):
- self._expire_keys()
- return list(iter(self._keys))
-
- def contains(self, key):
- self._expire_keys()
- return key in self._keys
-
- def delete(self, key):
- self._cache.delete(self._make_key(key))
- self._keys.discard(key)
-
- def flush(self):
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- return self._keys.copy()
diff --git a/v1/ansible/cache/memory.py b/v1/ansible/cache/memory.py
deleted file mode 100644
index 735ed32893..0000000000
--- a/v1/ansible/cache/memory.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.cache.base import BaseCacheModule
-
-class CacheModule(BaseCacheModule):
-
- def __init__(self, *args, **kwargs):
- self._cache = {}
-
- def get(self, key):
- return self._cache.get(key)
-
- def set(self, key, value):
- self._cache[key] = value
-
- def keys(self):
- return self._cache.keys()
-
- def contains(self, key):
- return key in self._cache
-
- def delete(self, key):
- del self._cache[key]
-
- def flush(self):
- self._cache = {}
-
- def copy(self):
- return self._cache.copy()
diff --git a/v1/ansible/cache/redis.py b/v1/ansible/cache/redis.py
deleted file mode 100644
index 7ae5ef74c1..0000000000
--- a/v1/ansible/cache/redis.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-import collections
-# FIXME: can we store these as something else before we ship it?
-import sys
-import time
-
-try:
- import simplejson as json
-except ImportError:
- import json
-
-from ansible import constants as C
-from ansible.utils import jsonify
-from ansible.cache.base import BaseCacheModule
-
-try:
- from redis import StrictRedis
-except ImportError:
- print "The 'redis' python module is required, 'pip install redis'"
- sys.exit(1)
-
-class CacheModule(BaseCacheModule):
- """
- A caching module backed by redis.
-
- Keys are maintained in a zset with their score being the timestamp
- when they are inserted. This allows for the usage of 'zremrangebyscore'
- to expire keys. This mechanism is used or a pattern matched 'scan' for
- performance.
- """
- def __init__(self, *args, **kwargs):
- if C.CACHE_PLUGIN_CONNECTION:
- connection = C.CACHE_PLUGIN_CONNECTION.split(':')
- else:
- connection = []
-
- self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
- self._prefix = C.CACHE_PLUGIN_PREFIX
- self._cache = StrictRedis(*connection)
- self._keys_set = 'ansible_cache_keys'
-
- def _make_key(self, key):
- return self._prefix + key
-
- def get(self, key):
- value = self._cache.get(self._make_key(key))
- # guard against the key not being removed from the zset;
- # this could happen in cases where the timeout value is changed
- # between invocations
- if value is None:
- self.delete(key)
- raise KeyError
- return json.loads(value)
-
- def set(self, key, value):
- value2 = jsonify(value)
- if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
- self._cache.setex(self._make_key(key), int(self._timeout), value2)
- else:
- self._cache.set(self._make_key(key), value2)
-
- self._cache.zadd(self._keys_set, time.time(), key)
-
- def _expire_keys(self):
- if self._timeout > 0:
- expiry_age = time.time() - self._timeout
- self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
-
- def keys(self):
- self._expire_keys()
- return self._cache.zrange(self._keys_set, 0, -1)
-
- def contains(self, key):
- self._expire_keys()
- return (self._cache.zrank(self._keys_set, key) >= 0)
-
- def delete(self, key):
- self._cache.delete(self._make_key(key))
- self._cache.zrem(self._keys_set, key)
-
- def flush(self):
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- # FIXME: there is probably a better way to do this in redis
- ret = dict()
- for key in self.keys():
- ret[key] = self.get(key)
- return ret
diff --git a/v1/ansible/callback_plugins/noop.py b/v1/ansible/callback_plugins/noop.py
deleted file mode 100644
index b5d5886874..0000000000
--- a/v1/ansible/callback_plugins/noop.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# (C) 2012-2014, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class CallbackModule(object):
-
- """
- this is an example ansible callback file that does nothing. You can drop
- other classes in the same directory to define your own handlers. Methods
- you do not use can be omitted. If self.disabled is set to True, the plugin
- methods will not be called.
-
- example uses include: logging, emailing, storing info, etc
- """
-
- def __init__(self):
- #if foo:
- # self.disabled = True
- pass
-
- def on_any(self, *args, **kwargs):
- pass
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- pass
-
- def runner_on_ok(self, host, res):
- pass
-
- def runner_on_skipped(self, host, item=None):
- pass
-
- def runner_on_unreachable(self, host, res):
- pass
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def runner_on_async_ok(self, host, res, jid):
- pass
-
- def runner_on_async_failed(self, host, res, jid):
- pass
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
-
- def playbook_on_task_start(self, name, is_conditional):
- pass
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- pass
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- pass
-
- def playbook_on_play_start(self, name):
- pass
-
- def playbook_on_stats(self, stats):
- pass
-
diff --git a/v1/ansible/callbacks.py b/v1/ansible/callbacks.py
deleted file mode 100644
index a7d2283cf0..0000000000
--- a/v1/ansible/callbacks.py
+++ /dev/null
@@ -1,729 +0,0 @@
-# (C) 2012-2014, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import utils
-import sys
-import getpass
-import os
-import subprocess
-import random
-import fnmatch
-import tempfile
-import fcntl
-import constants
-import locale
-from ansible.color import stringc
-from ansible.module_utils import basic
-from ansible.utils.unicode import to_unicode, to_bytes
-
-import logging
-if constants.DEFAULT_LOG_PATH != '':
- path = constants.DEFAULT_LOG_PATH
-
- if (os.path.exists(path) and not os.access(path, os.W_OK)) and not os.access(os.path.dirname(path), os.W_OK):
- sys.stderr.write("log file at %s is not writeable, aborting\n" % path)
- sys.exit(1)
-
-
- logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
- mypid = str(os.getpid())
- user = getpass.getuser()
- logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
-
-callback_plugins = []
-
-def load_callback_plugins():
- global callback_plugins
- callback_plugins = [x for x in utils.plugins.callback_loader.all()]
-
-def get_cowsay_info():
- if constants.ANSIBLE_NOCOWS:
- return (None, None)
- cowsay = None
- if os.path.exists("/usr/bin/cowsay"):
- cowsay = "/usr/bin/cowsay"
- elif os.path.exists("/usr/games/cowsay"):
- cowsay = "/usr/games/cowsay"
- elif os.path.exists("/usr/local/bin/cowsay"):
- # BSD path for cowsay
- cowsay = "/usr/local/bin/cowsay"
- elif os.path.exists("/opt/local/bin/cowsay"):
- # MacPorts path for cowsay
- cowsay = "/opt/local/bin/cowsay"
-
- noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
- if cowsay and noncow == 'random':
- cmd = subprocess.Popen([cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- cows = out.split()
- cows.append(False)
- noncow = random.choice(cows)
- return (cowsay, noncow)
-
-cowsay, noncow = get_cowsay_info()
-
-def log_lockfile():
- # create the path for the lockfile and open it
- tempdir = tempfile.gettempdir()
- uid = os.getuid()
- path = os.path.join(tempdir, ".ansible-lock.%s" % uid)
- lockfile = open(path, 'w')
- # use fcntl to set FD_CLOEXEC on the file descriptor,
- # so that we don't leak the file descriptor later
- lockfile_fd = lockfile.fileno()
- old_flags = fcntl.fcntl(lockfile_fd, fcntl.F_GETFD)
- fcntl.fcntl(lockfile_fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
- return lockfile
-
-LOG_LOCK = log_lockfile()
-
-def log_flock(runner):
- if runner is not None:
- try:
- fcntl.lockf(runner.output_lockfile, fcntl.LOCK_EX)
- except OSError:
- # already got closed?
- pass
- else:
- try:
- fcntl.lockf(LOG_LOCK, fcntl.LOCK_EX)
- except OSError:
- pass
-
-
-def log_unflock(runner):
- if runner is not None:
- try:
- fcntl.lockf(runner.output_lockfile, fcntl.LOCK_UN)
- except OSError:
- # already got closed?
- pass
- else:
- try:
- fcntl.lockf(LOG_LOCK, fcntl.LOCK_UN)
- except OSError:
- pass
-
-def set_playbook(callback, playbook):
- ''' used to notify callback plugins of playbook context '''
- callback.playbook = playbook
- for callback_plugin in callback_plugins:
- callback_plugin.playbook = playbook
-
-def set_play(callback, play):
- ''' used to notify callback plugins of context '''
- callback.play = play
- for callback_plugin in callback_plugins:
- callback_plugin.play = play
-
-def set_task(callback, task):
- ''' used to notify callback plugins of context '''
- callback.task = task
- for callback_plugin in callback_plugins:
- callback_plugin.task = task
-
-def display(msg, color=None, stderr=False, screen_only=False, log_only=False, runner=None):
- # prevent a very rare case of interlaced multiprocess I/O
- log_flock(runner)
- msg2 = msg
- if color:
- msg2 = stringc(msg, color)
- if not log_only:
- if not stderr:
- try:
- print msg2
- except UnicodeEncodeError:
- print msg2.encode('utf-8')
- else:
- try:
- print >>sys.stderr, msg2
- except UnicodeEncodeError:
- print >>sys.stderr, msg2.encode('utf-8')
- if constants.DEFAULT_LOG_PATH != '':
- while msg.startswith("\n"):
- msg = msg.replace("\n","")
- if not screen_only:
- if color == 'red':
- logger.error(msg)
- else:
- logger.info(msg)
- log_unflock(runner)
-
-def call_callback_module(method_name, *args, **kwargs):
-
- for callback_plugin in callback_plugins:
- # a plugin that set self.disabled to True will not be called
- # see osx_say.py example for such a plugin
- if getattr(callback_plugin, 'disabled', False):
- continue
- methods = [
- getattr(callback_plugin, method_name, None),
- getattr(callback_plugin, 'on_any', None)
- ]
- for method in methods:
- if method is not None:
- method(*args, **kwargs)
-
-def vv(msg, host=None):
- return verbose(msg, host=host, caplevel=1)
-
-def vvv(msg, host=None):
- return verbose(msg, host=host, caplevel=2)
-
-def vvvv(msg, host=None):
- return verbose(msg, host=host, caplevel=3)
-
-def verbose(msg, host=None, caplevel=2):
- msg = utils.sanitize_output(msg)
- if utils.VERBOSITY > caplevel:
- if host is None:
- display(msg, color='blue')
- else:
- display("<%s> %s" % (host, msg), color='blue')
-
-class AggregateStats(object):
- ''' holds stats about per-host activity during playbook runs '''
-
- def __init__(self):
-
- self.processed = {}
- self.failures = {}
- self.ok = {}
- self.dark = {}
- self.changed = {}
- self.skipped = {}
-
- def _increment(self, what, host):
- ''' helper function to bump a statistic '''
-
- self.processed[host] = 1
- prev = (getattr(self, what)).get(host, 0)
- getattr(self, what)[host] = prev+1
-
- def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
- ''' walk through all results and increment stats '''
-
- for (host, value) in runner_results.get('contacted', {}).iteritems():
- if not ignore_errors and (('failed' in value and bool(value['failed'])) or
- ('failed_when_result' in value and [value['failed_when_result']] or ['rc' in value and value['rc'] != 0])[0]):
- self._increment('failures', host)
- elif 'skipped' in value and bool(value['skipped']):
- self._increment('skipped', host)
- elif 'changed' in value and bool(value['changed']):
- if not setup and not poll:
- self._increment('changed', host)
- self._increment('ok', host)
- else:
- if not poll or ('finished' in value and bool(value['finished'])):
- self._increment('ok', host)
-
- for (host, value) in runner_results.get('dark', {}).iteritems():
- self._increment('dark', host)
-
-
- def summarize(self, host):
- ''' return information about a particular host '''
-
- return dict(
- ok = self.ok.get(host, 0),
- failures = self.failures.get(host, 0),
- unreachable = self.dark.get(host,0),
- changed = self.changed.get(host, 0),
- skipped = self.skipped.get(host, 0)
- )
-
-########################################################################
-
-def regular_generic_msg(hostname, result, oneline, caption):
- ''' output on the result of a module run that is not command '''
-
- if not oneline:
- return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result,format=True))
- else:
- return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result))
-
-
-def banner_cowsay(msg):
-
- if ": [" in msg:
- msg = msg.replace("[","")
- if msg.endswith("]"):
- msg = msg[:-1]
- runcmd = [cowsay,"-W", "60"]
- if noncow:
- runcmd.append('-f')
- runcmd.append(noncow)
- runcmd.append(msg)
- cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- return "%s\n" % out
-
-def banner_normal(msg):
-
- width = 78 - len(msg)
- if width < 3:
- width = 3
- filler = "*" * width
- return "\n%s %s " % (msg, filler)
-
-def banner(msg):
- if cowsay:
- try:
- return banner_cowsay(msg)
- except OSError:
- # somebody cleverly deleted cowsay or something during the PB run. heh.
- return banner_normal(msg)
- return banner_normal(msg)
-
-def command_generic_msg(hostname, result, oneline, caption):
- ''' output the result of a command run '''
-
- rc = result.get('rc', '0')
- stdout = result.get('stdout','')
- stderr = result.get('stderr', '')
- msg = result.get('msg', '')
-
- hostname = hostname.encode('utf-8')
- caption = caption.encode('utf-8')
-
- if not oneline:
- buf = "%s | %s | rc=%s >>\n" % (hostname, caption, result.get('rc',0))
- if stdout:
- buf += stdout
- if stderr:
- buf += stderr
- if msg:
- buf += msg
- return buf + "\n"
- else:
- if stderr:
- return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, rc, stdout, stderr)
- else:
- return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, rc, stdout)
-
-def host_report_msg(hostname, module_name, result, oneline):
- ''' summarize the JSON results for a particular host '''
-
- failed = utils.is_failed(result)
- msg = ('', None)
- if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False:
- if not failed:
- msg = (command_generic_msg(hostname, result, oneline, 'success'), 'green')
- else:
- msg = (command_generic_msg(hostname, result, oneline, 'FAILED'), 'red')
- else:
- if not failed:
- msg = (regular_generic_msg(hostname, result, oneline, 'success'), 'green')
- else:
- msg = (regular_generic_msg(hostname, result, oneline, 'FAILED'), 'red')
- return msg
-
-###############################################
-
-class DefaultRunnerCallbacks(object):
- ''' no-op callbacks for API usage of Runner() if no callbacks are specified '''
-
- def __init__(self):
- pass
-
- def on_failed(self, host, res, ignore_errors=False):
- call_callback_module('runner_on_failed', host, res, ignore_errors=ignore_errors)
-
- def on_ok(self, host, res):
- call_callback_module('runner_on_ok', host, res)
-
- def on_skipped(self, host, item=None):
- call_callback_module('runner_on_skipped', host, item=item)
-
- def on_unreachable(self, host, res):
- call_callback_module('runner_on_unreachable', host, res)
-
- def on_no_hosts(self):
- call_callback_module('runner_on_no_hosts')
-
- def on_async_poll(self, host, res, jid, clock):
- call_callback_module('runner_on_async_poll', host, res, jid, clock)
-
- def on_async_ok(self, host, res, jid):
- call_callback_module('runner_on_async_ok', host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- call_callback_module('runner_on_async_failed', host, res, jid)
-
- def on_file_diff(self, host, diff):
- call_callback_module('runner_on_file_diff', host, diff)
-
-########################################################################
-
-class CliRunnerCallbacks(DefaultRunnerCallbacks):
- ''' callbacks for use by /usr/bin/ansible '''
-
- def __init__(self):
- # set by /usr/bin/ansible later
- self.options = None
- self._async_notified = {}
-
- def on_failed(self, host, res, ignore_errors=False):
- self._on_any(host,res)
- super(CliRunnerCallbacks, self).on_failed(host, res, ignore_errors=ignore_errors)
-
- def on_ok(self, host, res):
- # hide magic variables used for ansible-playbook
- res.pop('verbose_override', None)
- res.pop('verbose_always', None)
-
- self._on_any(host,res)
- super(CliRunnerCallbacks, self).on_ok(host, res)
-
- def on_unreachable(self, host, res):
- if type(res) == dict:
- res = res.get('msg','')
- display("%s | FAILED => %s" % (host, res), stderr=True, color='red', runner=self.runner)
- if self.options.tree:
- utils.write_tree_file(
- self.options.tree, host,
- utils.jsonify(dict(failed=True, msg=res),format=True)
- )
- super(CliRunnerCallbacks, self).on_unreachable(host, res)
-
- def on_skipped(self, host, item=None):
- display("%s | skipped" % (host), runner=self.runner)
- super(CliRunnerCallbacks, self).on_skipped(host, item)
-
- def on_no_hosts(self):
- display("no hosts matched\n", stderr=True, runner=self.runner)
- super(CliRunnerCallbacks, self).on_no_hosts()
-
- def on_async_poll(self, host, res, jid, clock):
- if jid not in self._async_notified:
- self._async_notified[jid] = clock + 1
- if self._async_notified[jid] > clock:
- self._async_notified[jid] = clock
- display("<job %s> polling on %s, %ss remaining" % (jid, host, clock), runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_poll(host, res, jid, clock)
-
- def on_async_ok(self, host, res, jid):
- if jid:
- display("<job %s> finished on %s => %s"%(jid, host, utils.jsonify(res,format=True)), runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_ok(host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- display("<job %s> FAILED on %s => %s"%(jid, host, utils.jsonify(res,format=True)), color='red', stderr=True, runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_failed(host,res,jid)
-
- def _on_any(self, host, result):
- result2 = result.copy()
- result2.pop('invocation', None)
- (msg, color) = host_report_msg(host, self.options.module_name, result2, self.options.one_line)
- display(msg, color=color, runner=self.runner)
- if self.options.tree:
- utils.write_tree_file(self.options.tree, host, utils.jsonify(result2,format=True))
-
- def on_file_diff(self, host, diff):
- display(utils.get_diff(diff), runner=self.runner)
- super(CliRunnerCallbacks, self).on_file_diff(host, diff)
-
-########################################################################
-
-class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
- ''' callbacks used for Runner() from /usr/bin/ansible-playbook '''
-
- def __init__(self, stats, verbose=None):
-
- if verbose is None:
- verbose = utils.VERBOSITY
-
- self.verbose = verbose
- self.stats = stats
- self._async_notified = {}
-
- def on_unreachable(self, host, results):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- item = None
- if type(results) == dict:
- item = results.get('item', None)
- if isinstance(item, unicode):
- item = utils.unicode.to_bytes(item)
- results = basic.json_dict_unicode_to_bytes(results)
- else:
- results = utils.unicode.to_bytes(results)
- host = utils.unicode.to_bytes(host)
- if item:
- msg = "fatal: [%s] => (item=%s) => %s" % (host, item, results)
- else:
- msg = "fatal: [%s] => %s" % (host, results)
- display(msg, color='red', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_unreachable(host, results)
-
- def on_failed(self, host, results, ignore_errors=False):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- results2 = results.copy()
- results2.pop('invocation', None)
-
- item = results2.get('item', None)
- parsed = results2.get('parsed', True)
- module_msg = ''
- if not parsed:
- module_msg = results2.pop('msg', None)
- stderr = results2.pop('stderr', None)
- stdout = results2.pop('stdout', None)
- returned_msg = results2.pop('msg', None)
-
- results2['task'] = self.task.name
- results2['role'] = self.task.role_name
- results2['playbook'] = self.playbook.filename
-
- if item:
- msg = "failed: [%s] => (item=%s) => %s" % (host, item, utils.jsonify(results2))
- else:
- msg = "failed: [%s] => %s" % (host, utils.jsonify(results2))
- display(msg, color='red', runner=self.runner)
-
- if stderr:
- display("stderr: %s" % stderr, color='red', runner=self.runner)
- if stdout:
- display("stdout: %s" % stdout, color='red', runner=self.runner)
- if returned_msg:
- display("msg: %s" % returned_msg, color='red', runner=self.runner)
- if not parsed and module_msg:
- display(module_msg, color='red', runner=self.runner)
- if ignore_errors:
- display("...ignoring", color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_failed(host, results, ignore_errors=ignore_errors)
-
- def on_ok(self, host, host_result):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- item = host_result.get('item', None)
-
- host_result2 = host_result.copy()
- host_result2.pop('invocation', None)
- verbose_always = host_result2.pop('verbose_always', False)
- changed = host_result.get('changed', False)
- ok_or_changed = 'ok'
- if changed:
- ok_or_changed = 'changed'
-
- # show verbose output for non-setup module results if --verbose is used
- msg = ''
- if (not self.verbose or host_result2.get("verbose_override",None) is not
- None) and not verbose_always:
- if item:
- msg = "%s: [%s] => (item=%s)" % (ok_or_changed, host, item)
- else:
- if 'ansible_job_id' not in host_result or 'finished' in host_result:
- msg = "%s: [%s]" % (ok_or_changed, host)
- else:
- # verbose ...
- if item:
- msg = "%s: [%s] => (item=%s) => %s" % (ok_or_changed, host, item, utils.jsonify(host_result2, format=verbose_always))
- else:
- if 'ansible_job_id' not in host_result or 'finished' in host_result2:
- msg = "%s: [%s] => %s" % (ok_or_changed, host, utils.jsonify(host_result2, format=verbose_always))
-
- if msg != '':
- if not changed:
- display(msg, color='green', runner=self.runner)
- else:
- display(msg, color='yellow', runner=self.runner)
- if constants.COMMAND_WARNINGS and 'warnings' in host_result2 and host_result2['warnings']:
- for warning in host_result2['warnings']:
- display("warning: %s" % warning, color='purple', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_ok(host, host_result)
-
- def on_skipped(self, host, item=None):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- if constants.DISPLAY_SKIPPED_HOSTS:
- msg = ''
- if item:
- msg = "skipping: [%s] => (item=%s)" % (host, item)
- else:
- msg = "skipping: [%s]" % host
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_skipped(host, item)
-
- def on_no_hosts(self):
- display("FATAL: no hosts matched or all hosts have already failed -- aborting\n", color='red', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_no_hosts()
-
- def on_async_poll(self, host, res, jid, clock):
- if jid not in self._async_notified:
- self._async_notified[jid] = clock + 1
- if self._async_notified[jid] > clock:
- self._async_notified[jid] = clock
- msg = "<job %s> polling, %ss remaining"%(jid, clock)
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_poll(host,res,jid,clock)
-
- def on_async_ok(self, host, res, jid):
- if jid:
- msg = "<job %s> finished on %s"%(jid, host)
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_ok(host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- msg = "<job %s> FAILED on %s" % (jid, host)
- display(msg, color='red', stderr=True, runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_failed(host,res,jid)
-
- def on_file_diff(self, host, diff):
- display(utils.get_diff(diff), runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_file_diff(host, diff)
-
-########################################################################
-
-class PlaybookCallbacks(object):
- ''' playbook.py callbacks used by /usr/bin/ansible-playbook '''
-
- def __init__(self, verbose=False):
-
- self.verbose = verbose
-
- def on_start(self):
- call_callback_module('playbook_on_start')
-
- def on_notify(self, host, handler):
- call_callback_module('playbook_on_notify', host, handler)
-
- def on_no_hosts_matched(self):
- display("skipping: no hosts matched", color='cyan')
- call_callback_module('playbook_on_no_hosts_matched')
-
- def on_no_hosts_remaining(self):
- display("\nFATAL: all hosts have already failed -- aborting", color='red')
- call_callback_module('playbook_on_no_hosts_remaining')
-
- def on_task_start(self, name, is_conditional):
- name = utils.unicode.to_bytes(name)
- msg = "TASK: [%s]" % name
- if is_conditional:
- msg = "NOTIFIED: [%s]" % name
-
- if hasattr(self, 'start_at'):
- self.start_at = utils.unicode.to_bytes(self.start_at)
- if name == self.start_at or fnmatch.fnmatch(name, self.start_at):
- # we found out match, we can get rid of this now
- del self.start_at
- elif self.task.role_name:
- # handle tasks prefixed with rolenames
- actual_name = name.split('|', 1)[1].lstrip()
- if actual_name == self.start_at or fnmatch.fnmatch(actual_name, self.start_at):
- del self.start_at
-
- if hasattr(self, 'start_at'): # we still have start_at so skip the task
- self.skip_task = True
- elif hasattr(self, 'step') and self.step:
- if isinstance(name, str):
- name = utils.unicode.to_unicode(name)
- msg = u'Perform task: %s (y/n/c): ' % name
- if sys.stdout.encoding:
- msg = to_bytes(msg, sys.stdout.encoding)
- else:
- msg = to_bytes(msg)
- resp = raw_input(msg)
- if resp.lower() in ['y','yes']:
- self.skip_task = False
- display(banner(msg))
- elif resp.lower() in ['c', 'continue']:
- self.skip_task = False
- self.step = False
- display(banner(msg))
- else:
- self.skip_task = True
- else:
- self.skip_task = False
- display(banner(msg))
-
- call_callback_module('playbook_on_task_start', name, is_conditional)
-
- def on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
-
- if prompt and default is not None:
- msg = "%s [%s]: " % (prompt, default)
- elif prompt:
- msg = "%s: " % prompt
- else:
- msg = 'input for %s: ' % varname
-
- def do_prompt(prompt, private):
- if sys.stdout.encoding:
- msg = prompt.encode(sys.stdout.encoding)
- else:
- # when piping the output, or at other times when stdout
- # may not be the standard file descriptor, the stdout
- # encoding may not be set, so default to something sane
- msg = prompt.encode(locale.getpreferredencoding())
- if private:
- return getpass.getpass(msg)
- return raw_input(msg)
-
-
- if confirm:
- while True:
- result = do_prompt(msg, private)
- second = do_prompt("confirm " + msg, private)
- if result == second:
- break
- display("***** VALUES ENTERED DO NOT MATCH ****")
- else:
- result = do_prompt(msg, private)
-
- # if result is false and default is not None
- if not result and default is not None:
- result = default
-
-
- if encrypt:
- result = utils.do_encrypt(result, encrypt, salt_size, salt)
-
- # handle utf-8 chars
- result = to_unicode(result, errors='strict')
- call_callback_module( 'playbook_on_vars_prompt', varname, private=private, prompt=prompt,
- encrypt=encrypt, confirm=confirm, salt_size=salt_size, salt=None, default=default
- )
-
- return result
-
- def on_setup(self):
- display(banner("GATHERING FACTS"))
- call_callback_module('playbook_on_setup')
-
- def on_import_for_host(self, host, imported_file):
- msg = "%s: importing %s" % (host, imported_file)
- display(msg, color='cyan')
- call_callback_module('playbook_on_import_for_host', host, imported_file)
-
- def on_not_import_for_host(self, host, missing_file):
- msg = "%s: not importing file: %s" % (host, missing_file)
- display(msg, color='cyan')
- call_callback_module('playbook_on_not_import_for_host', host, missing_file)
-
- def on_play_start(self, name):
- display(banner("PLAY [%s]" % name))
- call_callback_module('playbook_on_play_start', name)
-
- def on_stats(self, stats):
- call_callback_module('playbook_on_stats', stats)
-
-
diff --git a/v1/ansible/color.py b/v1/ansible/color.py
deleted file mode 100644
index b3127d85fe..0000000000
--- a/v1/ansible/color.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import constants
-
-ANSIBLE_COLOR=True
-if constants.ANSIBLE_NOCOLOR:
- ANSIBLE_COLOR=False
-elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
- ANSIBLE_COLOR=False
-else:
- try:
- import curses
- curses.setupterm()
- if curses.tigetnum('colors') < 0:
- ANSIBLE_COLOR=False
- except ImportError:
- # curses library was not found
- pass
- except curses.error:
- # curses returns an error (e.g. could not find terminal)
- ANSIBLE_COLOR=False
-
-if constants.ANSIBLE_FORCE_COLOR:
- ANSIBLE_COLOR=True
-
-# --- begin "pretty"
-#
-# pretty - A miniature library that provides a Python print and stdout
-# wrapper that makes colored terminal text easier to use (e.g. without
-# having to mess around with ANSI escape sequences). This code is public
-# domain - there is no license except that you must leave this header.
-#
-# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
-#
-# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
-
-codeCodes = {
- 'black': '0;30', 'bright gray': '0;37',
- 'blue': '0;34', 'white': '1;37',
- 'green': '0;32', 'bright blue': '1;34',
- 'cyan': '0;36', 'bright green': '1;32',
- 'red': '0;31', 'bright cyan': '1;36',
- 'purple': '0;35', 'bright red': '1;31',
- 'yellow': '0;33', 'bright purple': '1;35',
- 'dark gray': '1;30', 'bright yellow': '1;33',
- 'normal': '0'
-}
-
-def stringc(text, color):
- """String in color."""
-
- if ANSIBLE_COLOR:
- return "\033["+codeCodes[color]+"m"+text+"\033[0m"
- else:
- return text
-
-# --- end "pretty"
-
diff --git a/v1/ansible/constants.py b/v1/ansible/constants.py
deleted file mode 100644
index 2cdc08d8ce..0000000000
--- a/v1/ansible/constants.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import sys
-import ConfigParser
-from string import ascii_letters, digits
-
-# copied from utils, avoid circular reference fun :)
-def mk_boolean(value):
- if value is None:
- return False
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
- ''' return a configuration variable with casting '''
- value = _get_config(p, section, key, env_var, default)
- if boolean:
- return mk_boolean(value)
- if value and integer:
- return int(value)
- if value and floating:
- return float(value)
- if value and islist:
- return [x.strip() for x in value.split(',')]
- return value
-
-def _get_config(p, section, key, env_var, default):
- ''' helper function for get_config '''
- if env_var is not None:
- value = os.environ.get(env_var, None)
- if value is not None:
- return value
- if p is not None:
- try:
- return p.get(section, key, raw=True)
- except:
- return default
- return default
-
-def load_config_file():
- ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
-
- p = ConfigParser.ConfigParser()
-
- path0 = os.getenv("ANSIBLE_CONFIG", None)
- if path0 is not None:
- path0 = os.path.expanduser(path0)
- path1 = os.getcwd() + "/ansible.cfg"
- path2 = os.path.expanduser("~/.ansible.cfg")
- path3 = "/etc/ansible/ansible.cfg"
-
- for path in [path0, path1, path2, path3]:
- if path is not None and os.path.exists(path):
- try:
- p.read(path)
- except ConfigParser.Error as e:
- print "Error reading config file: \n%s" % e
- sys.exit(1)
- return p
- return None
-
-def shell_expand_path(path):
- ''' shell_expand_path is needed as os.path.expanduser does not work
- when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
- if path:
- path = os.path.expanduser(os.path.expandvars(path))
- return path
-
-p = load_config_file()
-
-active_user = pwd.getpwuid(os.geteuid())[0]
-
-# check all of these extensions when looking for yaml files for things like
-# group variables -- really anything we can load
-YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
-
-# sections in config file
-DEFAULTS='defaults'
-
-# configurable things
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
-DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
-DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
-DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
-DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
-DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
-DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
-DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
-DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
-DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
-DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
-DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
-DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
-DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
-DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
-DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
-DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
-DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None))
-DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
-DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
-DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
-DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
-DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
-DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
-DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
-DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
-DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
-DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
-DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
-DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
-DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
-DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
-DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
-DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
-DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
-DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
-
-# selinux
-DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True)
-
-#TODO: get rid of ternary chain mess
-BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
-BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
-DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
-DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
-DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
-# need to rethink impementing these 2
-DEFAULT_BECOME_EXE = None
-#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
-#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H')
-
-
-DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
-DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
-DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
-DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
-DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
-DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
-DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
-
-CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
-CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
-CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
-CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
-
-ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
-ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
-ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
-DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
-DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
-HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
-SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
-DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
-DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
-COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
-DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
-DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
-
-
-RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
-RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
-
-# CONNECTION RELATED
-ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
-ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
-ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
-PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
-# obsolete -- will be formally removed
-ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
-ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
-ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
-ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
-ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
-ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
-ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
-ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
-ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
-PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
-
-# characters included in auto-generated passwords
-DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
-
-# non-configurable things
-DEFAULT_BECOME_PASS = None
-DEFAULT_SUDO_PASS = None
-DEFAULT_REMOTE_PASS = None
-DEFAULT_SUBSET = None
-DEFAULT_SU_PASS = None
-VAULT_VERSION_MIN = 1.0
-VAULT_VERSION_MAX = 1.0
diff --git a/v1/ansible/errors.py b/v1/ansible/errors.py
deleted file mode 100644
index 65edbc294a..0000000000
--- a/v1/ansible/errors.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-class AnsibleError(Exception):
- ''' The base Ansible exception from which all others should subclass '''
- pass
-
-class AnsibleFileNotFound(AnsibleError):
- pass
-
-class AnsibleConnectionFailed(AnsibleError):
- pass
-
-class AnsibleYAMLValidationFailed(AnsibleError):
- pass
-
-class AnsibleUndefinedVariable(AnsibleError):
- pass
-
-class AnsibleFilterError(AnsibleError):
- pass
diff --git a/v1/ansible/inventory/__init__.py b/v1/ansible/inventory/__init__.py
deleted file mode 100644
index f012246e22..0000000000
--- a/v1/ansible/inventory/__init__.py
+++ /dev/null
@@ -1,654 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-import fnmatch
-import os
-import sys
-import re
-import subprocess
-
-import ansible.constants as C
-from ansible.inventory.ini import InventoryParser
-from ansible.inventory.script import InventoryScript
-from ansible.inventory.dir import InventoryDirectory
-from ansible.inventory.group import Group
-from ansible.inventory.host import Host
-from ansible import errors
-from ansible import utils
-
-class Inventory(object):
- """
- Host inventory for ansible.
- """
-
- __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
-
- def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
-
- # the host file file, or script path, or list of hosts
- # if a list, inventory data will NOT be loaded
- self.host_list = host_list
- self._vault_password=vault_password
-
- # caching to avoid repeated calculations, particularly with
- # external inventory scripts.
-
- self._vars_per_host = {}
- self._vars_per_group = {}
- self._hosts_cache = {}
- self._groups_list = {}
- self._pattern_cache = {}
-
- # to be set by calling set_playbook_basedir by playbook code
- self._playbook_basedir = None
-
- # the inventory object holds a list of groups
- self.groups = []
-
- # a list of host(names) to contain current inquiries to
- self._restriction = None
- self._also_restriction = None
- self._subset = None
-
- if isinstance(host_list, basestring):
- if "," in host_list:
- host_list = host_list.split(",")
- host_list = [ h for h in host_list if h and h.strip() ]
-
- if host_list is None:
- self.parser = None
- elif isinstance(host_list, list):
- self.parser = None
- all = Group('all')
- self.groups = [ all ]
- ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
- for x in host_list:
- m = ipv6_re.match(x)
- if m:
- all.add_host(Host(m.groups()[0], m.groups()[1]))
- else:
- if ":" in x:
- tokens = x.rsplit(":", 1)
- # if there is ':' in the address, then this is an ipv6
- if ':' in tokens[0]:
- all.add_host(Host(x))
- else:
- all.add_host(Host(tokens[0], tokens[1]))
- else:
- all.add_host(Host(x))
- elif os.path.exists(host_list):
- if os.path.isdir(host_list):
- # Ensure basedir is inside the directory
- self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(filename=host_list)
- self.groups = self.parser.groups.values()
- else:
- # check to see if the specified file starts with a
- # shebang (#!/), so if an error is raised by the parser
- # class we can show a more apropos error
- shebang_present = False
- try:
- inv_file = open(host_list)
- first_line = inv_file.readlines()[0]
- inv_file.close()
- if first_line.startswith('#!'):
- shebang_present = True
- except:
- pass
-
- if utils.is_executable(host_list):
- try:
- self.parser = InventoryScript(filename=host_list)
- self.groups = self.parser.groups.values()
- except:
- if not shebang_present:
- raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \
- "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list)
- else:
- raise
- else:
- try:
- self.parser = InventoryParser(filename=host_list)
- self.groups = self.parser.groups.values()
- except:
- if shebang_present:
- raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \
- "Perhaps you want to correct this with `chmod +x %s`?" % host_list)
- else:
- raise
-
- utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
- else:
- raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
-
- self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
-
- # get group vars from group_vars/ files and vars plugins
- for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
-
- # get host vars from host_vars/ files and vars plugins
- for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
-
-
- def _match(self, str, pattern_str):
- try:
- if pattern_str.startswith('~'):
- return re.search(pattern_str[1:], str)
- else:
- return fnmatch.fnmatch(str, pattern_str)
- except Exception, e:
- raise errors.AnsibleError('invalid host pattern: %s' % pattern_str)
-
- def _match_list(self, items, item_attr, pattern_str):
- results = []
- try:
- if not pattern_str.startswith('~'):
- pattern = re.compile(fnmatch.translate(pattern_str))
- else:
- pattern = re.compile(pattern_str[1:])
- except Exception, e:
- raise errors.AnsibleError('invalid host pattern: %s' % pattern_str)
-
- for item in items:
- if pattern.match(getattr(item, item_attr)):
- results.append(item)
- return results
-
- def get_hosts(self, pattern="all"):
- """
- find all host names matching a pattern string, taking into account any inventory restrictions or
- applied subsets.
- """
-
- # process patterns
- if isinstance(pattern, list):
- pattern = ';'.join(pattern)
- patterns = pattern.replace(";",":").split(":")
- hosts = self._get_hosts(patterns)
-
- # exclude hosts not in a subset, if defined
- if self._subset:
- subset = self._get_hosts(self._subset)
- hosts = [ h for h in hosts if h in subset ]
-
- # exclude hosts mentioned in any restriction (ex: failed hosts)
- if self._restriction is not None:
- hosts = [ h for h in hosts if h.name in self._restriction ]
- if self._also_restriction is not None:
- hosts = [ h for h in hosts if h.name in self._also_restriction ]
-
- return hosts
-
- def _get_hosts(self, patterns):
- """
- finds hosts that match a list of patterns. Handles negative
- matches as well as intersection matches.
- """
-
- # Host specifiers should be sorted to ensure consistent behavior
- pattern_regular = []
- pattern_intersection = []
- pattern_exclude = []
- for p in patterns:
- if p.startswith("!"):
- pattern_exclude.append(p)
- elif p.startswith("&"):
- pattern_intersection.append(p)
- elif p:
- pattern_regular.append(p)
-
- # if no regular pattern was given, hence only exclude and/or intersection
- # make that magically work
- if pattern_regular == []:
- pattern_regular = ['all']
-
- # when applying the host selectors, run those without the "&" or "!"
- # first, then the &s, then the !s.
- patterns = pattern_regular + pattern_intersection + pattern_exclude
-
- hosts = []
-
- for p in patterns:
- # avoid resolving a pattern that is a plain host
- if p in self._hosts_cache:
- hosts.append(self.get_host(p))
- else:
- that = self.__get_hosts(p)
- if p.startswith("!"):
- hosts = [ h for h in hosts if h not in that ]
- elif p.startswith("&"):
- hosts = [ h for h in hosts if h in that ]
- else:
- to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
- hosts.extend(to_append)
- return hosts
-
- def __get_hosts(self, pattern):
- """
- finds hosts that positively match a particular pattern. Does not
- take into account negative matches.
- """
-
- if pattern in self._pattern_cache:
- return self._pattern_cache[pattern]
-
- (name, enumeration_details) = self._enumeration_info(pattern)
- hpat = self._hosts_in_unenumerated_pattern(name)
- result = self._apply_ranges(pattern, hpat)
- self._pattern_cache[pattern] = result
- return result
-
- def _enumeration_info(self, pattern):
- """
- returns (pattern, limits) taking a regular pattern and finding out
- which parts of it correspond to start/stop offsets. limits is
- a tuple of (start, stop) or None
- """
-
- # Do not parse regexes for enumeration info
- if pattern.startswith('~'):
- return (pattern, None)
-
- # The regex used to match on the range, which can be [x] or [x-y].
- pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
- m = pattern_re.match(pattern)
- if m:
- (target, first, last, rest) = m.groups()
- first = int(first)
- if last:
- if first < 0:
- raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
- last = int(last)
- else:
- last = first
- return (target, (first, last))
- else:
- return (pattern, None)
-
- def _apply_ranges(self, pat, hosts):
- """
- given a pattern like foo, that matches hosts, return all of hosts
- given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
- """
-
- # If there are no hosts to select from, just return the
- # empty set. This prevents trying to do selections on an empty set.
- # issue#6258
- if not hosts:
- return hosts
-
- (loose_pattern, limits) = self._enumeration_info(pat)
- if not limits:
- return hosts
-
- (left, right) = limits
-
- if left == '':
- left = 0
- if right == '':
- right = 0
- left=int(left)
- right=int(right)
- try:
- if left != right:
- return hosts[left:right]
- else:
- return [ hosts[left] ]
- except IndexError:
- raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
-
- def _create_implicit_localhost(self, pattern):
- new_host = Host(pattern)
- new_host.set_variable("ansible_python_interpreter", sys.executable)
- new_host.set_variable("ansible_connection", "local")
- ungrouped = self.get_group("ungrouped")
- if ungrouped is None:
- self.add_group(Group('ungrouped'))
- ungrouped = self.get_group('ungrouped')
- self.get_group('all').add_child_group(ungrouped)
- ungrouped.add_host(new_host)
- return new_host
-
- def _hosts_in_unenumerated_pattern(self, pattern):
- """ Get all host names matching the pattern """
-
- results = []
- hosts = []
- hostnames = set()
-
- # ignore any negative checks here, this is handled elsewhere
- pattern = pattern.replace("!","").replace("&", "")
-
- def __append_host_to_results(host):
- if host not in results and host.name not in hostnames:
- hostnames.add(host.name)
- results.append(host)
-
- groups = self.get_groups()
- for group in groups:
- if pattern == 'all':
- for host in group.get_hosts():
- __append_host_to_results(host)
- else:
- if self._match(group.name, pattern):
- for host in group.get_hosts():
- __append_host_to_results(host)
- else:
- matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
- for host in matching_hosts:
- __append_host_to_results(host)
-
- if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
- new_host = self._create_implicit_localhost(pattern)
- results.append(new_host)
- return results
-
- def clear_pattern_cache(self):
- ''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
- self._pattern_cache = {}
-
- def groups_for_host(self, host):
- if host in self._hosts_cache:
- return self._hosts_cache[host].get_groups()
- else:
- return []
-
- def groups_list(self):
- if not self._groups_list:
- groups = {}
- for g in self.groups:
- groups[g.name] = [h.name for h in g.get_hosts()]
- ancestors = g.get_ancestors()
- for a in ancestors:
- if a.name not in groups:
- groups[a.name] = [h.name for h in a.get_hosts()]
- self._groups_list = groups
- return self._groups_list
-
- def get_groups(self):
- return self.groups
-
- def get_host(self, hostname):
- if hostname not in self._hosts_cache:
- self._hosts_cache[hostname] = self._get_host(hostname)
- return self._hosts_cache[hostname]
-
- def _get_host(self, hostname):
- if hostname in ['localhost','127.0.0.1']:
- for host in self.get_group('all').get_hosts():
- if host.name in ['localhost', '127.0.0.1']:
- return host
- return self._create_implicit_localhost(hostname)
- else:
- for group in self.groups:
- for host in group.get_hosts():
- if hostname == host.name:
- return host
- return None
-
- def get_group(self, groupname):
- for group in self.groups:
- if group.name == groupname:
- return group
- return None
-
- def get_group_variables(self, groupname, update_cached=False, vault_password=None):
- if groupname not in self._vars_per_group or update_cached:
- self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
- return self._vars_per_group[groupname]
-
- def _get_group_variables(self, groupname, vault_password=None):
-
- group = self.get_group(groupname)
- if group is None:
- raise errors.AnsibleError("group not found: %s" % groupname)
-
- vars = {}
-
- # plugin.get_group_vars retrieves just vars for specific group
- vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
- for updated in vars_results:
- if updated is not None:
- vars = utils.combine_vars(vars, updated)
-
- # Read group_vars/ files
- vars = utils.combine_vars(vars, self.get_group_vars(group))
-
- return vars
-
- def get_variables(self, hostname, update_cached=False, vault_password=None):
-
- host = self.get_host(hostname)
- if not host:
- raise errors.AnsibleError("host not found: %s" % hostname)
- return host.get_variables()
-
- def get_host_variables(self, hostname, update_cached=False, vault_password=None):
-
- if hostname not in self._vars_per_host or update_cached:
- self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
- return self._vars_per_host[hostname]
-
- def _get_host_variables(self, hostname, vault_password=None):
-
- host = self.get_host(hostname)
- if host is None:
- raise errors.AnsibleError("host not found: %s" % hostname)
-
- vars = {}
-
- # plugin.run retrieves all vars (also from groups) for host
- vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
- for updated in vars_results:
- if updated is not None:
- vars = utils.combine_vars(vars, updated)
-
- # plugin.get_host_vars retrieves just vars for specific host
- vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
- for updated in vars_results:
- if updated is not None:
- vars = utils.combine_vars(vars, updated)
-
- # still need to check InventoryParser per host vars
- # which actually means InventoryScript per host,
- # which is not performant
- if self.parser is not None:
- vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
-
- # Read host_vars/ files
- vars = utils.combine_vars(vars, self.get_host_vars(host))
-
- return vars
-
- def add_group(self, group):
- if group.name not in self.groups_list():
- self.groups.append(group)
- self._groups_list = None # invalidate internal cache
- else:
- raise errors.AnsibleError("group already in inventory: %s" % group.name)
-
- def list_hosts(self, pattern="all"):
-
- """ return a list of hostnames for a pattern """
-
- result = [ h.name for h in self.get_hosts(pattern) ]
- if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
- result = [pattern]
- return result
-
- def list_groups(self):
- return sorted([ g.name for g in self.groups ], key=lambda x: x)
-
- # TODO: remove this function
- def get_restriction(self):
- return self._restriction
-
- def restrict_to(self, restriction):
- """
- Restrict list operations to the hosts given in restriction. This is used
- to exclude failed hosts in main playbook code, don't use this for other
- reasons.
- """
- if not isinstance(restriction, list):
- restriction = [ restriction ]
- self._restriction = restriction
-
- def also_restrict_to(self, restriction):
- """
- Works like restict_to but offers an additional restriction. Playbooks use this
- to implement serial behavior.
- """
- if not isinstance(restriction, list):
- restriction = [ restriction ]
- self._also_restriction = restriction
-
- def subset(self, subset_pattern):
- """
- Limits inventory results to a subset of inventory that matches a given
- pattern, such as to select a given geographic of numeric slice amongst
- a previous 'hosts' selection that only select roles, or vice versa.
- Corresponds to --limit parameter to ansible-playbook
- """
- if subset_pattern is None:
- self._subset = None
- else:
- subset_pattern = subset_pattern.replace(',',':')
- subset_pattern = subset_pattern.replace(";",":").split(":")
- results = []
- # allow Unix style @filename data
- for x in subset_pattern:
- if x.startswith("@"):
- fd = open(x[1:])
- results.extend(fd.read().split("\n"))
- fd.close()
- else:
- results.append(x)
- self._subset = results
-
- def lift_restriction(self):
- """ Do not restrict list operations """
- self._restriction = None
-
- def lift_also_restriction(self):
- """ Clears the also restriction """
- self._also_restriction = None
-
- def is_file(self):
- """ did inventory come from a file? """
- if not isinstance(self.host_list, basestring):
- return False
- return os.path.exists(self.host_list)
-
- def basedir(self):
- """ if inventory came from a file, what's the directory? """
- if not self.is_file():
- return None
- dname = os.path.dirname(self.host_list)
- if dname is None or dname == '' or dname == '.':
- cwd = os.getcwd()
- return os.path.abspath(cwd)
- return os.path.abspath(dname)
-
- def src(self):
- """ if inventory came from a file, what's the directory and file name? """
- if not self.is_file():
- return None
- return self.host_list
-
- def playbook_basedir(self):
- """ returns the directory of the current playbook """
- return self._playbook_basedir
-
- def set_playbook_basedir(self, dir):
- """
- sets the base directory of the playbook so inventory can use it as a
- basedir for host_ and group_vars, and other things.
- """
- # Only update things if dir is a different playbook basedir
- if dir != self._playbook_basedir:
- self._playbook_basedir = dir
- # get group vars from group_vars/ files
- for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
- # get host vars from host_vars/ files
- for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
- # invalidate cache
- self._vars_per_host = {}
- self._vars_per_group = {}
-
- def get_host_vars(self, host, new_pb_basedir=False):
- """ Read host_vars/ files """
- return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir)
-
- def get_group_vars(self, group, new_pb_basedir=False):
- """ Read group_vars/ files """
- return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir)
-
- def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False):
- """
- Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
- to the inventory base directory or in the same directory as the playbook. Variables in the playbook
- dir will win over the inventory dir if files are in both.
- """
-
- results = {}
- scan_pass = 0
- _basedir = self.basedir()
-
- # look in both the inventory base directory and the playbook base directory
- # unless we do an update for a new playbook base dir
- if not new_pb_basedir:
- basedirs = [_basedir, self._playbook_basedir]
- else:
- basedirs = [self._playbook_basedir]
-
- for basedir in basedirs:
-
- # this can happen from particular API usages, particularly if not run
- # from /usr/bin/ansible-playbook
- if basedir is None:
- continue
-
- scan_pass = scan_pass + 1
-
- # it's not an eror if the directory does not exist, keep moving
- if not os.path.exists(basedir):
- continue
-
- # save work of second scan if the directories are the same
- if _basedir == self._playbook_basedir and scan_pass != 1:
- continue
-
- if group and host is None:
- # load vars in dir/group_vars/name_of_group
- base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
- elif host and group is None:
- # same for hostvars in dir/host_vars/name_of_host
- base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
- # all done, results is a dictionary of variables for this particular host.
- return results
-
diff --git a/v1/ansible/inventory/dir.py b/v1/ansible/inventory/dir.py
deleted file mode 100644
index 9ac23fff89..0000000000
--- a/v1/ansible/inventory/dir.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
-# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-import os
-import ansible.constants as C
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-from ansible.inventory.ini import InventoryParser
-from ansible.inventory.script import InventoryScript
-from ansible import utils
-from ansible import errors
-
-class InventoryDirectory(object):
- ''' Host inventory parser for ansible using a directory of inventories. '''
-
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
- self.names = os.listdir(filename)
- self.names.sort()
- self.directory = filename
- self.parsers = []
- self.hosts = {}
- self.groups = {}
-
- for i in self.names:
-
- # Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
- continue
- # Skip hidden files
- if i.startswith('.') and not i.startswith('./'):
- continue
- # These are things inside of an inventory basedir
- if i in ("host_vars", "group_vars", "vars_plugins"):
- continue
- fullpath = os.path.join(self.directory, i)
- if os.path.isdir(fullpath):
- parser = InventoryDirectory(filename=fullpath)
- elif utils.is_executable(fullpath):
- parser = InventoryScript(filename=fullpath)
- else:
- parser = InventoryParser(filename=fullpath)
- self.parsers.append(parser)
-
- # retrieve all groups and hosts form the parser and add them to
- # self, don't look at group lists yet, to avoid
- # recursion trouble, but just make sure all objects exist in self
- newgroups = parser.groups.values()
- for group in newgroups:
- for host in group.hosts:
- self._add_host(host)
- for group in newgroups:
- self._add_group(group)
-
- # now check the objects lists so they contain only objects from
- # self; membership data in groups is already fine (except all &
- # ungrouped, see later), but might still reference objects not in self
- for group in self.groups.values():
- # iterate on a copy of the lists, as those lists get changed in
- # the loop
- # list with group's child group objects:
- for child in group.child_groups[:]:
- if child != self.groups[child.name]:
- group.child_groups.remove(child)
- group.child_groups.append(self.groups[child.name])
- # list with group's parent group objects:
- for parent in group.parent_groups[:]:
- if parent != self.groups[parent.name]:
- group.parent_groups.remove(parent)
- group.parent_groups.append(self.groups[parent.name])
- # list with group's host objects:
- for host in group.hosts[:]:
- if host != self.hosts[host.name]:
- group.hosts.remove(host)
- group.hosts.append(self.hosts[host.name])
- # also check here that the group that contains host, is
- # also contained in the host's group list
- if group not in self.hosts[host.name].groups:
- self.hosts[host.name].groups.append(group)
-
- # extra checks on special groups all and ungrouped
- # remove hosts from 'ungrouped' if they became member of other groups
- if 'ungrouped' in self.groups:
- ungrouped = self.groups['ungrouped']
- # loop on a copy of ungrouped hosts, as we want to change that list
- for host in ungrouped.hosts[:]:
- if len(host.groups) > 1:
- host.groups.remove(ungrouped)
- ungrouped.hosts.remove(host)
-
- # remove hosts from 'all' if they became member of other groups
- # all should only contain direct children, not grandchildren
- # direct children should have dept == 1
- if 'all' in self.groups:
- allgroup = self.groups['all' ]
- # loop on a copy of all's child groups, as we want to change that list
- for group in allgroup.child_groups[:]:
- # groups might once have beeen added to all, and later be added
- # to another group: we need to remove the link wit all then
- if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
- # real children of all have just 1 parent, all
- # this one has more, so not a direct child of all anymore
- group.parent_groups.remove(allgroup)
- allgroup.child_groups.remove(group)
- elif allgroup not in group.parent_groups:
- # this group was once added to all, but doesn't list it as
- # a parent any more; the info in the group is the correct
- # info
- allgroup.child_groups.remove(group)
-
-
- def _add_group(self, group):
- """ Merge an existing group or add a new one;
- Track parent and child groups, and hosts of the new one """
-
- if group.name not in self.groups:
- # it's brand new, add him!
- self.groups[group.name] = group
- if self.groups[group.name] != group:
- # different object, merge
- self._merge_groups(self.groups[group.name], group)
-
- def _add_host(self, host):
- if host.name not in self.hosts:
- # Papa's got a brand new host
- self.hosts[host.name] = host
- if self.hosts[host.name] != host:
- # different object, merge
- self._merge_hosts(self.hosts[host.name], host)
-
- def _merge_groups(self, group, newgroup):
- """ Merge all of instance newgroup into group,
- update parent/child relationships
- group lists may still contain group objects that exist in self with
- same name, but was instanciated as a different object in some other
- inventory parser; these are handled later """
-
- # name
- if group.name != newgroup.name:
- raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
-
- # depth
- group.depth = max([group.depth, newgroup.depth])
-
- # hosts list (host objects are by now already added to self.hosts)
- for host in newgroup.hosts:
- grouphosts = dict([(h.name, h) for h in group.hosts])
- if host.name in grouphosts:
- # same host name but different object, merge
- self._merge_hosts(grouphosts[host.name], host)
- else:
- # new membership, add host to group from self
- # group from self will also be added again to host.groups, but
- # as different object
- group.add_host(self.hosts[host.name])
- # now remove this the old object for group in host.groups
- for hostgroup in [g for g in host.groups]:
- if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
- self.hosts[host.name].groups.remove(hostgroup)
-
-
- # group child membership relation
- for newchild in newgroup.child_groups:
- # dict with existing child groups:
- childgroups = dict([(g.name, g) for g in group.child_groups])
- # check if child of new group is already known as a child
- if newchild.name not in childgroups:
- self.groups[group.name].add_child_group(newchild)
-
- # group parent membership relation
- for newparent in newgroup.parent_groups:
- # dict with existing parent groups:
- parentgroups = dict([(g.name, g) for g in group.parent_groups])
- # check if parent of new group is already known as a parent
- if newparent.name not in parentgroups:
- if newparent.name not in self.groups:
- # group does not exist yet in self, import him
- self.groups[newparent.name] = newparent
- # group now exists but not yet as a parent here
- self.groups[newparent.name].add_child_group(group)
-
- # variables
- group.vars = utils.combine_vars(group.vars, newgroup.vars)
-
- def _merge_hosts(self,host, newhost):
- """ Merge all of instance newhost into host """
-
- # name
- if host.name != newhost.name:
- raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
-
- # group membership relation
- for newgroup in newhost.groups:
- # dict with existing groups:
- hostgroups = dict([(g.name, g) for g in host.groups])
- # check if new group is already known as a group
- if newgroup.name not in hostgroups:
- if newgroup.name not in self.groups:
- # group does not exist yet in self, import him
- self.groups[newgroup.name] = newgroup
- # group now exists but doesn't have host yet
- self.groups[newgroup.name].add_host(host)
-
- # variables
- host.vars = utils.combine_vars(host.vars, newhost.vars)
-
- def get_host_variables(self, host):
- """ Gets additional host variables from all inventories """
- vars = {}
- for i in self.parsers:
- vars.update(i.get_host_variables(host))
- return vars
-
diff --git a/v1/ansible/inventory/expand_hosts.py b/v1/ansible/inventory/expand_hosts.py
deleted file mode 100644
index f129740935..0000000000
--- a/v1/ansible/inventory/expand_hosts.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# (c) 2012, Zettar Inc.
-# Written by Chin Fang <fangchin@zettar.com>
-#
-# This file is part of Ansible
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
-
-'''
-This module is for enhancing ansible's inventory parsing capability such
-that it can deal with hostnames specified using a simple pattern in the
-form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified,
-it defaults to 0.
-
-If beg is given and is left-zero-padded, e.g. '001', it is taken as a
-formatting hint when the range is expanded. e.g. [001:010] is to be
-expanded into 001, 002 ...009, 010.
-
-Note that when beg is specified with left zero padding, then the length of
-end must be the same as that of beg, else an exception is raised.
-'''
-import string
-
-from ansible import errors
-
-def detect_range(line = None):
- '''
- A helper function that checks a given host line to see if it contains
- a range pattern described in the docstring above.
-
- Returnes True if the given line contains a pattern, else False.
- '''
- if 0 <= line.find("[") < line.find(":") < line.find("]"):
- return True
- else:
- return False
-
-def expand_hostname_range(line = None):
- '''
- A helper function that expands a given line that contains a pattern
- specified in top docstring, and returns a list that consists of the
- expanded version.
-
- The '[' and ']' characters are used to maintain the pseudo-code
- appearance. They are replaced in this function with '|' to ease
- string splitting.
-
- References: http://ansible.github.com/patterns.html#hosts-and-groups
- '''
- all_hosts = []
- if line:
- # A hostname such as db[1:6]-node is considered to consists
- # three parts:
- # head: 'db'
- # nrange: [1:6]; range() is a built-in. Can't use the name
- # tail: '-node'
-
- # Add support for multiple ranges in a host so:
- # db[01:10:3]node-[01:10]
- # - to do this we split off at the first [...] set, getting the list
- # of hosts and then repeat until none left.
- # - also add an optional third parameter which contains the step. (Default: 1)
- # so range can be [01:10:2] -> 01 03 05 07 09
- # FIXME: make this work for alphabetic sequences too.
-
- (head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|')
- bounds = nrange.split(":")
- if len(bounds) != 2 and len(bounds) != 3:
- raise errors.AnsibleError("host range incorrectly specified")
- beg = bounds[0]
- end = bounds[1]
- if len(bounds) == 2:
- step = 1
- else:
- step = bounds[2]
- if not beg:
- beg = "0"
- if not end:
- raise errors.AnsibleError("host range end value missing")
- if beg[0] == '0' and len(beg) > 1:
- rlen = len(beg) # range length formatting hint
- if rlen != len(end):
- raise errors.AnsibleError("host range format incorrectly specified!")
- fill = lambda _: str(_).zfill(rlen) # range sequence
- else:
- fill = str
-
- try:
- i_beg = string.ascii_letters.index(beg)
- i_end = string.ascii_letters.index(end)
- if i_beg > i_end:
- raise errors.AnsibleError("host range format incorrectly specified!")
- seq = string.ascii_letters[i_beg:i_end+1]
- except ValueError: # not an alpha range
- seq = range(int(beg), int(end)+1, int(step))
-
- for rseq in seq:
- hname = ''.join((head, fill(rseq), tail))
-
- if detect_range(hname):
- all_hosts.extend( expand_hostname_range( hname ) )
- else:
- all_hosts.append(hname)
-
- return all_hosts
diff --git a/v1/ansible/inventory/group.py b/v1/ansible/inventory/group.py
deleted file mode 100644
index 262558e69c..0000000000
--- a/v1/ansible/inventory/group.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-class Group(object):
- ''' a group of ansible hosts '''
-
- __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
-
- def __init__(self, name=None):
-
- self.depth = 0
- self.name = name
- self.hosts = []
- self.vars = {}
- self.child_groups = []
- self.parent_groups = []
- self._hosts_cache = None
- #self.clear_hosts_cache()
- if self.name is None:
- raise Exception("group name is required")
-
- def add_child_group(self, group):
-
- if self == group:
- raise Exception("can't add group to itself")
-
- # don't add if it's already there
- if not group in self.child_groups:
- self.child_groups.append(group)
-
- # update the depth of the child
- group.depth = max([self.depth+1, group.depth])
-
- # update the depth of the grandchildren
- group._check_children_depth()
-
- # now add self to child's parent_groups list, but only if there
- # isn't already a group with the same name
- if not self.name in [g.name for g in group.parent_groups]:
- group.parent_groups.append(self)
-
- self.clear_hosts_cache()
-
- def _check_children_depth(self):
-
- for group in self.child_groups:
- group.depth = max([self.depth+1, group.depth])
- group._check_children_depth()
-
- def add_host(self, host):
-
- self.hosts.append(host)
- host.add_group(self)
- self.clear_hosts_cache()
-
- def set_variable(self, key, value):
-
- self.vars[key] = value
-
- def clear_hosts_cache(self):
-
- self._hosts_cache = None
- for g in self.parent_groups:
- g.clear_hosts_cache()
-
- def get_hosts(self):
-
- if self._hosts_cache is None:
- self._hosts_cache = self._get_hosts()
-
- return self._hosts_cache
-
- def _get_hosts(self):
-
- hosts = []
- seen = {}
- for kid in self.child_groups:
- kid_hosts = kid.get_hosts()
- for kk in kid_hosts:
- if kk not in seen:
- seen[kk] = 1
- hosts.append(kk)
- for mine in self.hosts:
- if mine not in seen:
- seen[mine] = 1
- hosts.append(mine)
- return hosts
-
- def get_variables(self):
- return self.vars.copy()
-
- def _get_ancestors(self):
-
- results = {}
- for g in self.parent_groups:
- results[g.name] = g
- results.update(g._get_ancestors())
- return results
-
- def get_ancestors(self):
-
- return self._get_ancestors().values()
-
diff --git a/v1/ansible/inventory/host.py b/v1/ansible/inventory/host.py
deleted file mode 100644
index d4dc20fa46..0000000000
--- a/v1/ansible/inventory/host.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.constants as C
-from ansible import utils
-
-class Host(object):
- ''' a single ansible host '''
-
- __slots__ = [ 'name', 'vars', 'groups' ]
-
- def __init__(self, name=None, port=None):
-
- self.name = name
- self.vars = {}
- self.groups = []
- if port and port != C.DEFAULT_REMOTE_PORT:
- self.set_variable('ansible_ssh_port', int(port))
-
- if self.name is None:
- raise Exception("host name is required")
-
- def add_group(self, group):
-
- self.groups.append(group)
-
- def set_variable(self, key, value):
-
- self.vars[key]=value
-
- def get_groups(self):
-
- groups = {}
- for g in self.groups:
- groups[g.name] = g
- ancestors = g.get_ancestors()
- for a in ancestors:
- groups[a.name] = a
- return groups.values()
-
- def get_variables(self):
-
- results = {}
- groups = self.get_groups()
- for group in sorted(groups, key=lambda g: g.depth):
- results = utils.combine_vars(results, group.get_variables())
- results = utils.combine_vars(results, self.vars)
- results['inventory_hostname'] = self.name
- results['inventory_hostname_short'] = self.name.split('.')[0]
- results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
- return results
-
-
diff --git a/v1/ansible/inventory/ini.py b/v1/ansible/inventory/ini.py
deleted file mode 100644
index bd9a98e7f8..0000000000
--- a/v1/ansible/inventory/ini.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-import ansible.constants as C
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-from ansible.inventory.expand_hosts import detect_range
-from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible import errors
-from ansible import utils
-import shlex
-import re
-import ast
-
-class InventoryParser(object):
- """
- Host inventory for ansible.
- """
-
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
- with open(filename) as fh:
- self.filename = filename
- self.lines = fh.readlines()
- self.groups = {}
- self.hosts = {}
- self._parse()
-
- def _parse(self):
-
- self._parse_base_groups()
- self._parse_group_children()
- self._add_allgroup_children()
- self._parse_group_variables()
- return self.groups
-
- @staticmethod
- def _parse_value(v):
- if "#" not in v:
- try:
- ret = ast.literal_eval(v)
- if not isinstance(ret, float):
- # Do not trim floats. Eg: "1.20" to 1.2
- return ret
- # Using explicit exceptions.
- # Likely a string that literal_eval does not like. We wil then just set it.
- except ValueError:
- # For some reason this was thought to be malformed.
- pass
- except SyntaxError:
- # Is this a hash with an equals at the end?
- pass
- return v
-
- # [webservers]
- # alpha
- # beta:2345
- # gamma sudo=True user=root
- # delta asdf=jkl favcolor=red
-
- def _add_allgroup_children(self):
-
- for group in self.groups.values():
- if group.depth == 0 and group.name != 'all':
- self.groups['all'].add_child_group(group)
-
-
- def _parse_base_groups(self):
- # FIXME: refactor
-
- ungrouped = Group(name='ungrouped')
- all = Group(name='all')
- all.add_child_group(ungrouped)
-
- self.groups = dict(all=all, ungrouped=ungrouped)
- active_group_name = 'ungrouped'
-
- for lineno in range(len(self.lines)):
- line = utils.before_comment(self.lines[lineno]).strip()
- if line.startswith("[") and line.endswith("]"):
- active_group_name = line.replace("[","").replace("]","")
- if ":vars" in line or ":children" in line:
- active_group_name = active_group_name.rsplit(":", 1)[0]
- if active_group_name not in self.groups:
- new_group = self.groups[active_group_name] = Group(name=active_group_name)
- active_group_name = None
- elif active_group_name not in self.groups:
- new_group = self.groups[active_group_name] = Group(name=active_group_name)
- elif line.startswith(";") or line == '':
- pass
- elif active_group_name:
- tokens = shlex.split(line)
- if len(tokens) == 0:
- continue
- hostname = tokens[0]
- port = C.DEFAULT_REMOTE_PORT
- # Three cases to check:
- # 0. A hostname that contains a range pesudo-code and a port
- # 1. A hostname that contains just a port
- if hostname.count(":") > 1:
- # Possible an IPv6 address, or maybe a host line with multiple ranges
- # IPv6 with Port XXX:XXX::XXX.port
- # FQDN foo.example.com
- if hostname.count(".") == 1:
- (hostname, port) = hostname.rsplit(".", 1)
- elif ("[" in hostname and
- "]" in hostname and
- ":" in hostname and
- (hostname.rindex("]") < hostname.rindex(":")) or
- ("]" not in hostname and ":" in hostname)):
- (hostname, port) = hostname.rsplit(":", 1)
-
- hostnames = []
- if detect_range(hostname):
- hostnames = expand_hostname_range(hostname)
- else:
- hostnames = [hostname]
-
- for hn in hostnames:
- host = None
- if hn in self.hosts:
- host = self.hosts[hn]
- else:
- host = Host(name=hn, port=port)
- self.hosts[hn] = host
- if len(tokens) > 1:
- for t in tokens[1:]:
- if t.startswith('#'):
- break
- try:
- (k,v) = t.split("=", 1)
- except ValueError, e:
- raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
- host.set_variable(k, self._parse_value(v))
- self.groups[active_group_name].add_host(host)
-
- # [southeast:children]
- # atlanta
- # raleigh
-
- def _parse_group_children(self):
- group = None
-
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
- if line is None or line == '':
- continue
- if line.startswith("[") and ":children]" in line:
- line = line.replace("[","").replace(":children]","")
- group = self.groups.get(line, None)
- if group is None:
- group = self.groups[line] = Group(name=line)
- elif line.startswith("#") or line.startswith(";"):
- pass
- elif line.startswith("["):
- group = None
- elif group:
- kid_group = self.groups.get(line, None)
- if kid_group is None:
- raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
- else:
- group.add_child_group(kid_group)
-
-
- # [webservers:vars]
- # http_port=1234
- # maxRequestsPerChild=200
-
- def _parse_group_variables(self):
- group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
- if line.startswith("[") and ":vars]" in line:
- line = line.replace("[","").replace(":vars]","")
- group = self.groups.get(line, None)
- if group is None:
- raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
- elif line.startswith("#") or line.startswith(";"):
- pass
- elif line.startswith("["):
- group = None
- elif line == '':
- pass
- elif group:
- if "=" not in line:
- raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
- else:
- (k, v) = [e.strip() for e in line.split("=", 1)]
- group.set_variable(k, self._parse_value(v))
-
- def get_host_variables(self, host):
- return {}
diff --git a/v1/ansible/inventory/script.py b/v1/ansible/inventory/script.py
deleted file mode 100644
index b83cb9bcc7..0000000000
--- a/v1/ansible/inventory/script.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-import os
-import subprocess
-import ansible.constants as C
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-from ansible.module_utils.basic import json_dict_bytes_to_unicode
-from ansible import utils
-from ansible import errors
-import sys
-
-
-class InventoryScript(object):
- ''' Host inventory parser for ansible using external inventory scripts. '''
-
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
- # Support inventory scripts that are not prefixed with some
- # path information but happen to be in the current working
- # directory when '.' is not in PATH.
- self.filename = os.path.abspath(filename)
- cmd = [ self.filename, "--list" ]
- try:
- sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
- (stdout, stderr) = sp.communicate()
-
- if sp.returncode != 0:
- raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
-
- self.data = stdout
- # see comment about _meta below
- self.host_vars_from_top = None
- self.groups = self._parse(stderr)
-
-
- def _parse(self, err):
-
- all_hosts = {}
-
- # not passing from_remote because data from CMDB is trusted
- self.raw = utils.parse_json(self.data)
- self.raw = json_dict_bytes_to_unicode(self.raw)
-
- all = Group('all')
- groups = dict(all=all)
- group = None
-
-
- if 'failed' in self.raw:
- sys.stderr.write(err + "\n")
- raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
-
- for (group_name, data) in self.raw.items():
-
- # in Ansible 1.3 and later, a "_meta" subelement may contain
- # a variable "hostvars" which contains a hash for each host
- # if this "hostvars" exists at all then do not call --host for each
- # host. This is for efficiency and scripts should still return data
- # if called with --host for backwards compat with 1.2 and earlier.
-
- if group_name == '_meta':
- if 'hostvars' in data:
- self.host_vars_from_top = data['hostvars']
- continue
-
- if group_name != all.name:
- group = groups[group_name] = Group(group_name)
- else:
- group = all
- host = None
-
- if not isinstance(data, dict):
- data = {'hosts': data}
- # is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars','children')):
- data = {'hosts': [group_name], 'vars': data}
-
- if 'hosts' in data:
- if not isinstance(data['hosts'], list):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
- "data for the host list:\n %s" % (group_name, data))
-
- for hostname in data['hosts']:
- if not hostname in all_hosts:
- all_hosts[hostname] = Host(hostname)
- host = all_hosts[hostname]
- group.add_host(host)
-
- if 'vars' in data:
- if not isinstance(data['vars'], dict):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
- "data for variables:\n %s" % (group_name, data))
-
- for k, v in data['vars'].iteritems():
- if group.name == all.name:
- all.set_variable(k, v)
- else:
- group.set_variable(k, v)
-
- # Separate loop to ensure all groups are defined
- for (group_name, data) in self.raw.items():
- if group_name == '_meta':
- continue
- if isinstance(data, dict) and 'children' in data:
- for child_name in data['children']:
- if child_name in groups:
- groups[group_name].add_child_group(groups[child_name])
-
- for group in groups.values():
- if group.depth == 0 and group.name != 'all':
- all.add_child_group(group)
-
- return groups
-
- def get_host_variables(self, host):
- """ Runs <script> --host <hostname> to determine additional host variables """
- if self.host_vars_from_top is not None:
- got = self.host_vars_from_top.get(host.name, {})
- return got
-
-
- cmd = [self.filename, "--host", host.name]
- try:
- sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
- (out, err) = sp.communicate()
- if out.strip() == '':
- return dict()
- try:
- return json_dict_bytes_to_unicode(utils.parse_json(out))
- except ValueError:
- raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
-
diff --git a/v1/ansible/inventory/vars_plugins/noop.py b/v1/ansible/inventory/vars_plugins/noop.py
deleted file mode 100644
index 5d4b4b6658..0000000000
--- a/v1/ansible/inventory/vars_plugins/noop.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-class VarsModule(object):
-
- """
- Loads variables for groups and/or hosts
- """
-
- def __init__(self, inventory):
-
- """ constructor """
-
- self.inventory = inventory
- self.inventory_basedir = inventory.basedir()
-
-
- def run(self, host, vault_password=None):
- """ For backwards compatibility, when only vars per host were retrieved
- This method should return both host specific vars as well as vars
- calculated from groups it is a member of """
- return {}
-
-
- def get_host_vars(self, host, vault_password=None):
- """ Get host specific variables. """
- return {}
-
-
- def get_group_vars(self, group, vault_password=None):
- """ Get group specific variables. """
- return {}
-
diff --git a/v1/ansible/module_common.py b/v1/ansible/module_common.py
deleted file mode 100644
index fba5b9137d..0000000000
--- a/v1/ansible/module_common.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# from python and deps
-from cStringIO import StringIO
-import inspect
-import os
-import shlex
-
-# from Ansible
-from ansible import errors
-from ansible import utils
-from ansible import constants as C
-from ansible import __version__
-from ansible.utils.unicode import to_bytes
-
-REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
-REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
-REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
-REPLACER_WINDOWS = "# POWERSHELL_COMMON"
-REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
-REPLACER_SELINUX = "<<SELINUX_SPECIAL_FILESYSTEMS>>"
-
-
-class ModuleReplacer(object):
-
- """
- The Replacer is used to insert chunks of code into modules before
- transfer. Rather than doing classical python imports, this allows for more
- efficient transfer in a no-bootstrapping scenario by not moving extra files
- over the wire, and also takes care of embedding arguments in the transferred
- modules.
-
- This version is done in such a way that local imports can still be
- used in the module code, so IDEs don't have to be aware of what is going on.
-
- Example:
-
- from ansible.module_utils.basic import *
-
- ... will result in the insertion basic.py into the module
-
- from the module_utils/ directory in the source tree.
-
- All modules are required to import at least basic, though there will also
- be other snippets.
-
- # POWERSHELL_COMMON
-
- Also results in the inclusion of the common code in powershell.ps1
-
- """
-
- # ******************************************************************************
-
- def __init__(self, strip_comments=False):
- this_file = inspect.getfile(inspect.currentframe())
- self.snippet_path = os.path.join(os.path.dirname(this_file), 'module_utils')
- self.strip_comments = strip_comments # TODO: implement
-
- # ******************************************************************************
-
-
- def slurp(self, path):
- if not os.path.exists(path):
- raise errors.AnsibleError("imported module support code does not exist at %s" % path)
- fd = open(path)
- data = fd.read()
- fd.close()
- return data
-
- def _find_snippet_imports(self, module_data, module_path):
- """
- Given the source of the module, convert it to a Jinja2 template to insert
- module code and return whether it's a new or old style module.
- """
-
- module_style = 'old'
- if REPLACER in module_data:
- module_style = 'new'
- elif 'from ansible.module_utils.' in module_data:
- module_style = 'new'
- elif 'WANT_JSON' in module_data:
- module_style = 'non_native_want_json'
-
- output = StringIO()
- lines = module_data.split('\n')
- snippet_names = []
-
- for line in lines:
-
- if REPLACER in line:
- output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
- snippet_names.append('basic')
- if REPLACER_WINDOWS in line:
- ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1"))
- output.write(ps_data)
- snippet_names.append('powershell')
- elif line.startswith('from ansible.module_utils.'):
- tokens=line.split(".")
- import_error = False
- if len(tokens) != 3:
- import_error = True
- if " import *" not in line:
- import_error = True
- if import_error:
- raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
- snippet_name = tokens[2].split()[0]
- snippet_names.append(snippet_name)
- output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
-
- else:
- if self.strip_comments and line.startswith("#") or line == '':
- pass
- output.write(line)
- output.write("\n")
-
- if not module_path.endswith(".ps1"):
- # Unixy modules
- if len(snippet_names) > 0 and not 'basic' in snippet_names:
- raise errors.AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
- else:
- # Windows modules
- if len(snippet_names) > 0 and not 'powershell' in snippet_names:
- raise errors.AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
-
- return (output.getvalue(), module_style)
-
- # ******************************************************************************
-
- def modify_module(self, module_path, complex_args, module_args, inject):
-
- with open(module_path) as f:
-
- # read in the module source
- module_data = f.read()
-
- (module_data, module_style) = self._find_snippet_imports(module_data, module_path)
-
- complex_args_json = utils.jsonify(complex_args)
- # We force conversion of module_args to str because module_common calls shlex.split,
- # a standard library function that incorrectly handles Unicode input before Python 2.7.3.
- # Note: it would be better to do all this conversion at the border
- # (when the data is originally parsed into data structures) but
- # it's currently coming from too many sources to make that
- # effective.
- try:
- encoded_args = repr(module_args.encode('utf-8'))
- except UnicodeDecodeError:
- encoded_args = repr(module_args)
- try:
- encoded_complex = repr(complex_args_json.encode('utf-8'))
- except UnicodeDecodeError:
- encoded_complex = repr(complex_args_json.encode('utf-8'))
-
- # these strings should be part of the 'basic' snippet which is required to be included
- module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
- module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS))
- module_data = module_data.replace(REPLACER_ARGS, encoded_args)
- module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex)
-
- if module_style == 'new':
- facility = C.DEFAULT_SYSLOG_FACILITY
- if 'ansible_syslog_facility' in inject:
- facility = inject['ansible_syslog_facility']
- module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
-
- lines = module_data.split("\n")
- shebang = None
- if lines[0].startswith("#!"):
- shebang = lines[0].strip()
- args = shlex.split(str(shebang[2:]))
- interpreter = args[0]
- interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
-
- if interpreter_config in inject:
- interpreter = to_bytes(inject[interpreter_config], errors='strict')
- lines[0] = shebang = "#!%s %s" % (interpreter, " ".join(args[1:]))
- module_data = "\n".join(lines)
-
- return (module_data, module_style, shebang)
-
diff --git a/v1/ansible/module_utils/a10.py b/v1/ansible/module_utils/a10.py
deleted file mode 100644
index cfc217ee61..0000000000
--- a/v1/ansible/module_utils/a10.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-AXAPI_PORT_PROTOCOLS = {
- 'tcp': 2,
- 'udp': 3,
-}
-
-AXAPI_VPORT_PROTOCOLS = {
- 'tcp': 2,
- 'udp': 3,
- 'fast-http': 9,
- 'http': 11,
- 'https': 12,
-}
-
-def a10_argument_spec():
- return dict(
- host=dict(type='str', required=True),
- username=dict(type='str', aliases=['user', 'admin'], required=True),
- password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
- write_config=dict(type='bool', default=False)
- )
-
-def axapi_failure(result):
- if 'response' in result and result['response'].get('status') == 'fail':
- return True
- return False
-
-def axapi_call(module, url, post=None):
- '''
- Returns a datastructure based on the result of the API call
- '''
- rsp, info = fetch_url(module, url, data=post)
- if not rsp or info['status'] >= 400:
- module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
- try:
- raw_data = rsp.read()
- data = json.loads(raw_data)
- except ValueError:
- # at least one API call (system.action.write_config) returns
- # XML even when JSON is requested, so do some minimal handling
- # here to prevent failing even when the call succeeded
- if 'status="ok"' in raw_data.lower():
- data = {"response": {"status": "OK"}}
- else:
- data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
- except:
- module.fail_json(msg="could not read the result from the host")
- finally:
- rsp.close()
- return data
-
-def axapi_authenticate(module, base_url, username, password):
- url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
- result = axapi_call(module, url)
- if axapi_failure(result):
- return module.fail_json(msg=result['response']['err']['msg'])
- sessid = result['session_id']
- return base_url + '&session_id=' + sessid
-
-def axapi_enabled_disabled(flag):
- '''
- The axapi uses 0/1 integer values for flags, rather than strings
- or booleans, so convert the given flag to a 0 or 1. For now, params
- are specified as strings only so thats what we check.
- '''
- if flag == 'enabled':
- return 1
- else:
- return 0
-
-def axapi_get_port_protocol(protocol):
- return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
-
-def axapi_get_vport_protocol(protocol):
- return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
-
diff --git a/v1/ansible/module_utils/basic.py b/v1/ansible/module_utils/basic.py
deleted file mode 100644
index e772a12efc..0000000000
--- a/v1/ansible/module_utils/basic.py
+++ /dev/null
@@ -1,1631 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-# == BEGIN DYNAMICALLY INSERTED CODE ==
-
-ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
-
-MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>"
-MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
-
-BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
-BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
-BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
-
-SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
-
-# ansible modules can be written in any language. To simplify
-# development of Python modules, the functions available here
-# can be inserted in any module source automatically by including
-# #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside
-# of an ansible module. The source of this common code lives
-# in lib/ansible/module_common.py
-
-import locale
-import os
-import re
-import pipes
-import shlex
-import subprocess
-import sys
-import syslog
-import types
-import time
-import select
-import shutil
-import stat
-import tempfile
-import traceback
-import grp
-import pwd
-import platform
-import errno
-import tempfile
-
-try:
- import json
-except ImportError:
- try:
- import simplejson as json
- except ImportError:
- sys.stderr.write('Error: ansible requires a json module, none found!')
- sys.exit(1)
- except SyntaxError:
- sys.stderr.write('SyntaxError: probably due to json and python being for different versions')
- sys.exit(1)
-
-HAVE_SELINUX=False
-try:
- import selinux
- HAVE_SELINUX=True
-except ImportError:
- pass
-
-HAVE_HASHLIB=False
-try:
- from hashlib import sha1 as _sha1
- HAVE_HASHLIB=True
-except ImportError:
- from sha import sha as _sha1
-
-try:
- from hashlib import md5 as _md5
-except ImportError:
- try:
- from md5 import md5 as _md5
- except ImportError:
- # MD5 unavailable. Possibly FIPS mode
- _md5 = None
-
-try:
- from hashlib import sha256 as _sha256
-except ImportError:
- pass
-
-try:
- from systemd import journal
- has_journal = True
-except ImportError:
- import syslog
- has_journal = False
-
-try:
- from ast import literal_eval as _literal_eval
-except ImportError:
- # a replacement for literal_eval that works with python 2.4. from:
- # https://mail.python.org/pipermail/python-list/2009-September/551880.html
- # which is essentially a cut/past from an earlier (2.6) version of python's
- # ast.py
- from compiler import parse
- from compiler.ast import *
- def _literal_eval(node_or_string):
- """
- Safely evaluate an expression node or a string containing a Python
- expression. The string or node provided may only consist of the following
- Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
- and None.
- """
- _safe_names = {'None': None, 'True': True, 'False': False}
- if isinstance(node_or_string, basestring):
- node_or_string = parse(node_or_string, mode='eval')
- if isinstance(node_or_string, Expression):
- node_or_string = node_or_string.node
- def _convert(node):
- if isinstance(node, Const) and isinstance(node.value, (basestring, int, float, long, complex)):
- return node.value
- elif isinstance(node, Tuple):
- return tuple(map(_convert, node.nodes))
- elif isinstance(node, List):
- return list(map(_convert, node.nodes))
- elif isinstance(node, Dict):
- return dict((_convert(k), _convert(v)) for k, v in node.items)
- elif isinstance(node, Name):
- if node.name in _safe_names:
- return _safe_names[node.name]
- elif isinstance(node, UnarySub):
- return -_convert(node.expr)
- raise ValueError('malformed string')
- return _convert(node_or_string)
-
-FILE_COMMON_ARGUMENTS=dict(
- src = dict(),
- mode = dict(),
- owner = dict(),
- group = dict(),
- seuser = dict(),
- serole = dict(),
- selevel = dict(),
- setype = dict(),
- follow = dict(type='bool', default=False),
- # not taken by the file module, but other modules call file so it must ignore them.
- content = dict(no_log=True),
- backup = dict(),
- force = dict(),
- remote_src = dict(), # used by assemble
- regexp = dict(), # used by assemble
- delimiter = dict(), # used by assemble
- directory_mode = dict(), # used by copy
-)
-
-PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
-
-def get_platform():
- ''' what's the platform? example: Linux is a platform. '''
- return platform.system()
-
-def get_distribution():
- ''' return the distribution name '''
- if platform.system() == 'Linux':
- try:
- supported_dists = platform._supported_dists + ('arch',)
- distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
- if not distribution and os.path.isfile('/etc/system-release'):
- distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
- if 'Amazon' in distribution:
- distribution = 'Amazon'
- else:
- distribution = 'OtherLinux'
- except:
- # FIXME: MethodMissing, I assume?
- distribution = platform.dist()[0].capitalize()
- else:
- distribution = None
- return distribution
-
-def get_distribution_version():
- ''' return the distribution version '''
- if platform.system() == 'Linux':
- try:
- distribution_version = platform.linux_distribution()[1]
- if not distribution_version and os.path.isfile('/etc/system-release'):
- distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
- except:
- # FIXME: MethodMissing, I assume?
- distribution_version = platform.dist()[1]
- else:
- distribution_version = None
- return distribution_version
-
-def load_platform_subclass(cls, *args, **kwargs):
- '''
- used by modules like User to have different implementations based on detected platform. See User
- module for an example.
- '''
-
- this_platform = get_platform()
- distribution = get_distribution()
- subclass = None
-
- # get the most specific superclass for this platform
- if distribution is not None:
- for sc in cls.__subclasses__():
- if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
- subclass = sc
- if subclass is None:
- for sc in cls.__subclasses__():
- if sc.platform == this_platform and sc.distribution is None:
- subclass = sc
- if subclass is None:
- subclass = cls
-
- return super(cls, subclass).__new__(subclass)
-
-
-def json_dict_unicode_to_bytes(d):
- ''' Recursively convert dict keys and values to byte str
-
- Specialized for json return because this only handles, lists, tuples,
- and dict container types (the containers that the json module returns)
- '''
-
- if isinstance(d, unicode):
- return d.encode('utf-8')
- elif isinstance(d, dict):
- return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
- elif isinstance(d, list):
- return list(map(json_dict_unicode_to_bytes, d))
- elif isinstance(d, tuple):
- return tuple(map(json_dict_unicode_to_bytes, d))
- else:
- return d
-
-def json_dict_bytes_to_unicode(d):
- ''' Recursively convert dict keys and values to byte str
-
- Specialized for json return because this only handles, lists, tuples,
- and dict container types (the containers that the json module returns)
- '''
-
- if isinstance(d, str):
- return unicode(d, 'utf-8')
- elif isinstance(d, dict):
- return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
- elif isinstance(d, list):
- return list(map(json_dict_bytes_to_unicode, d))
- elif isinstance(d, tuple):
- return tuple(map(json_dict_bytes_to_unicode, d))
- else:
- return d
-
-def heuristic_log_sanitize(data):
- ''' Remove strings that look like passwords from log messages '''
- # Currently filters:
- # user:pass@foo/whatever and http://username:pass@wherever/foo
- # This code has false positives and consumes parts of logs that are
- # not passwds
-
- # begin: start of a passwd containing string
- # end: end of a passwd containing string
- # sep: char between user and passwd
- # prev_begin: where in the overall string to start a search for
- # a passwd
- # sep_search_end: where in the string to end a search for the sep
- output = []
- begin = len(data)
- prev_begin = begin
- sep = 1
- while sep:
- # Find the potential end of a passwd
- try:
- end = data.rindex('@', 0, begin)
- except ValueError:
- # No passwd in the rest of the data
- output.insert(0, data[0:begin])
- break
-
- # Search for the beginning of a passwd
- sep = None
- sep_search_end = end
- while not sep:
- # URL-style username+password
- try:
- begin = data.rindex('://', 0, sep_search_end)
- except ValueError:
- # No url style in the data, check for ssh style in the
- # rest of the string
- begin = 0
- # Search for separator
- try:
- sep = data.index(':', begin + 3, end)
- except ValueError:
- # No separator; choices:
- if begin == 0:
- # Searched the whole string so there's no password
- # here. Return the remaining data
- output.insert(0, data[0:begin])
- break
- # Search for a different beginning of the password field.
- sep_search_end = begin
- continue
- if sep:
- # Password was found; remove it.
- output.insert(0, data[end:prev_begin])
- output.insert(0, '********')
- output.insert(0, data[begin:sep + 1])
- prev_begin = begin
-
- return ''.join(output)
-
-
-class AnsibleModule(object):
-
- def __init__(self, argument_spec, bypass_checks=False, no_log=False,
- check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
- required_one_of=None, add_file_common_args=False, supports_check_mode=False,
- required_if=None):
-
- '''
- common code for quickly building an ansible module in Python
- (although you can write modules in anything that can return JSON)
- see library/* for examples
- '''
-
- self.argument_spec = argument_spec
- self.supports_check_mode = supports_check_mode
- self.check_mode = False
- self.no_log = no_log
- self.cleanup_files = []
-
- self.aliases = {}
-
- if add_file_common_args:
- for k, v in FILE_COMMON_ARGUMENTS.iteritems():
- if k not in self.argument_spec:
- self.argument_spec[k] = v
-
- # check the locale as set by the current environment, and
- # reset to LANG=C if it's an invalid/unavailable locale
- self._check_locale()
-
- (self.params, self.args) = self._load_params()
-
- self._legal_inputs = ['CHECKMODE', 'NO_LOG']
-
- self.aliases = self._handle_aliases()
-
- if check_invalid_arguments:
- self._check_invalid_arguments()
- self._check_for_check_mode()
- self._check_for_no_log()
-
- # check exclusive early
- if not bypass_checks:
- self._check_mutually_exclusive(mutually_exclusive)
-
- self._set_defaults(pre=True)
-
- if not bypass_checks:
- self._check_required_arguments()
- self._check_argument_values()
- self._check_argument_types()
- self._check_required_together(required_together)
- self._check_required_one_of(required_one_of)
- self._check_required_if(required_if)
-
- self._set_defaults(pre=False)
- if not self.no_log:
- self._log_invocation()
-
- # finally, make sure we're in a sane working dir
- self._set_cwd()
-
- def load_file_common_arguments(self, params):
- '''
- many modules deal with files, this encapsulates common
- options that the file module accepts such that it is directly
- available to all modules and they can share code.
- '''
-
- path = params.get('path', params.get('dest', None))
- if path is None:
- return {}
- else:
- path = os.path.expanduser(path)
-
- # if the path is a symlink, and we're following links, get
- # the target of the link instead for testing
- if params.get('follow', False) and os.path.islink(path):
- path = os.path.realpath(path)
-
- mode = params.get('mode', None)
- owner = params.get('owner', None)
- group = params.get('group', None)
-
- # selinux related options
- seuser = params.get('seuser', None)
- serole = params.get('serole', None)
- setype = params.get('setype', None)
- selevel = params.get('selevel', None)
- secontext = [seuser, serole, setype]
-
- if self.selinux_mls_enabled():
- secontext.append(selevel)
-
- default_secontext = self.selinux_default_context(path)
- for i in range(len(default_secontext)):
- if i is not None and secontext[i] == '_default':
- secontext[i] = default_secontext[i]
-
- return dict(
- path=path, mode=mode, owner=owner, group=group,
- seuser=seuser, serole=serole, setype=setype,
- selevel=selevel, secontext=secontext,
- )
-
-
- # Detect whether using selinux that is MLS-aware.
- # While this means you can set the level/range with
- # selinux.lsetfilecon(), it may or may not mean that you
- # will get the selevel as part of the context returned
- # by selinux.lgetfilecon().
-
- def selinux_mls_enabled(self):
- if not HAVE_SELINUX:
- return False
- if selinux.is_selinux_mls_enabled() == 1:
- return True
- else:
- return False
-
- def selinux_enabled(self):
- if not HAVE_SELINUX:
- seenabled = self.get_bin_path('selinuxenabled')
- if seenabled is not None:
- (rc,out,err) = self.run_command(seenabled)
- if rc == 0:
- self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
- return False
- if selinux.is_selinux_enabled() == 1:
- return True
- else:
- return False
-
- # Determine whether we need a placeholder for selevel/mls
- def selinux_initial_context(self):
- context = [None, None, None]
- if self.selinux_mls_enabled():
- context.append(None)
- return context
-
- def _to_filesystem_str(self, path):
- '''Returns filesystem path as a str, if it wasn't already.
-
- Used in selinux interactions because it cannot accept unicode
- instances, and specifying complex args in a playbook leaves
- you with unicode instances. This method currently assumes
- that your filesystem encoding is UTF-8.
-
- '''
- if isinstance(path, unicode):
- path = path.encode("utf-8")
- return path
-
- # If selinux fails to find a default, return an array of None
- def selinux_default_context(self, path, mode=0):
- context = self.selinux_initial_context()
- if not HAVE_SELINUX or not self.selinux_enabled():
- return context
- try:
- ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
- except OSError:
- return context
- if ret[0] == -1:
- return context
- # Limit split to 4 because the selevel, the last in the list,
- # may contain ':' characters
- context = ret[1].split(':', 3)
- return context
-
- def selinux_context(self, path):
- context = self.selinux_initial_context()
- if not HAVE_SELINUX or not self.selinux_enabled():
- return context
- try:
- ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
- except OSError, e:
- if e.errno == errno.ENOENT:
- self.fail_json(path=path, msg='path %s does not exist' % path)
- else:
- self.fail_json(path=path, msg='failed to retrieve selinux context')
- if ret[0] == -1:
- return context
- # Limit split to 4 because the selevel, the last in the list,
- # may contain ':' characters
- context = ret[1].split(':', 3)
- return context
-
- def user_and_group(self, filename):
- filename = os.path.expanduser(filename)
- st = os.lstat(filename)
- uid = st.st_uid
- gid = st.st_gid
- return (uid, gid)
-
- def find_mount_point(self, path):
- path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
- while not os.path.ismount(path):
- path = os.path.dirname(path)
- return path
-
- def is_special_selinux_path(self, path):
- """
- Returns a tuple containing (True, selinux_context) if the given path is on a
- NFS or other 'special' fs mount point, otherwise the return will be (False, None).
- """
- try:
- f = open('/proc/mounts', 'r')
- mount_data = f.readlines()
- f.close()
- except:
- return (False, None)
- path_mount_point = self.find_mount_point(path)
- for line in mount_data:
- (device, mount_point, fstype, options, rest) = line.split(' ', 4)
-
- if path_mount_point == mount_point:
- for fs in SELINUX_SPECIAL_FS.split(','):
- if fs in fstype:
- special_context = self.selinux_context(path_mount_point)
- return (True, special_context)
-
- return (False, None)
-
- def set_default_selinux_context(self, path, changed):
- if not HAVE_SELINUX or not self.selinux_enabled():
- return changed
- context = self.selinux_default_context(path)
- return self.set_context_if_different(path, context, False)
-
- def set_context_if_different(self, path, context, changed):
-
- if not HAVE_SELINUX or not self.selinux_enabled():
- return changed
- cur_context = self.selinux_context(path)
- new_context = list(cur_context)
- # Iterate over the current context instead of the
- # argument context, which may have selevel.
-
- (is_special_se, sp_context) = self.is_special_selinux_path(path)
- if is_special_se:
- new_context = sp_context
- else:
- for i in range(len(cur_context)):
- if len(context) > i:
- if context[i] is not None and context[i] != cur_context[i]:
- new_context[i] = context[i]
- if context[i] is None:
- new_context[i] = cur_context[i]
-
- if cur_context != new_context:
- try:
- if self.check_mode:
- return True
- rc = selinux.lsetfilecon(self._to_filesystem_str(path),
- str(':'.join(new_context)))
- except OSError:
- self.fail_json(path=path, msg='invalid selinux context', new_context=new_context, cur_context=cur_context, input_was=context)
- if rc != 0:
- self.fail_json(path=path, msg='set selinux context failed')
- changed = True
- return changed
-
- def set_owner_if_different(self, path, owner, changed):
- path = os.path.expanduser(path)
- if owner is None:
- return changed
- orig_uid, orig_gid = self.user_and_group(path)
- try:
- uid = int(owner)
- except ValueError:
- try:
- uid = pwd.getpwnam(owner).pw_uid
- except KeyError:
- self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
- if orig_uid != uid:
- if self.check_mode:
- return True
- try:
- os.lchown(path, uid, -1)
- except OSError:
- self.fail_json(path=path, msg='chown failed')
- changed = True
- return changed
-
- def set_group_if_different(self, path, group, changed):
- path = os.path.expanduser(path)
- if group is None:
- return changed
- orig_uid, orig_gid = self.user_and_group(path)
- try:
- gid = int(group)
- except ValueError:
- try:
- gid = grp.getgrnam(group).gr_gid
- except KeyError:
- self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
- if orig_gid != gid:
- if self.check_mode:
- return True
- try:
- os.lchown(path, -1, gid)
- except OSError:
- self.fail_json(path=path, msg='chgrp failed')
- changed = True
- return changed
-
- def set_mode_if_different(self, path, mode, changed):
- path = os.path.expanduser(path)
- path_stat = os.lstat(path)
-
- if mode is None:
- return changed
-
- if not isinstance(mode, int):
- try:
- mode = int(mode, 8)
- except Exception:
- try:
- mode = self._symbolic_mode_to_octal(path_stat, mode)
- except Exception, e:
- self.fail_json(path=path,
- msg="mode must be in octal or symbolic form",
- details=str(e))
-
- prev_mode = stat.S_IMODE(path_stat.st_mode)
-
- if prev_mode != mode:
- if self.check_mode:
- return True
- # FIXME: comparison against string above will cause this to be executed
- # every time
- try:
- if hasattr(os, 'lchmod'):
- os.lchmod(path, mode)
- else:
- if not os.path.islink(path):
- os.chmod(path, mode)
- else:
- # Attempt to set the perms of the symlink but be
- # careful not to change the perms of the underlying
- # file while trying
- underlying_stat = os.stat(path)
- os.chmod(path, mode)
- new_underlying_stat = os.stat(path)
- if underlying_stat.st_mode != new_underlying_stat.st_mode:
- os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
- q_stat = os.stat(path)
- except OSError, e:
- if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
- pass
- elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
- pass
- else:
- raise e
- except Exception, e:
- self.fail_json(path=path, msg='chmod failed', details=str(e))
-
- path_stat = os.lstat(path)
- new_mode = stat.S_IMODE(path_stat.st_mode)
-
- if new_mode != prev_mode:
- changed = True
- return changed
-
- def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
- new_mode = stat.S_IMODE(path_stat.st_mode)
-
- mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst]*|[ugo])$')
- for mode in symbolic_mode.split(','):
- match = mode_re.match(mode)
- if match:
- users = match.group('users')
- operator = match.group('operator')
- perms = match.group('perms')
-
- if users == 'a': users = 'ugo'
-
- for user in users:
- mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
- new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
- else:
- raise ValueError("bad symbolic permission for mode: %s" % mode)
- return new_mode
-
- def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
- if operator == '=':
- if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
- elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
- elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
-
- # mask out u, g, or o permissions from current_mode and apply new permissions
- inverse_mask = mask ^ 07777
- new_mode = (current_mode & inverse_mask) | mode_to_apply
- elif operator == '+':
- new_mode = current_mode | mode_to_apply
- elif operator == '-':
- new_mode = current_mode - (current_mode & mode_to_apply)
- return new_mode
-
- def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
- prev_mode = stat.S_IMODE(path_stat.st_mode)
-
- is_directory = stat.S_ISDIR(path_stat.st_mode)
- has_x_permissions = (prev_mode & 00111) > 0
- apply_X_permission = is_directory or has_x_permissions
-
- # Permission bits constants documented at:
- # http://docs.python.org/2/library/stat.html#stat.S_ISUID
- if apply_X_permission:
- X_perms = {
- 'u': {'X': stat.S_IXUSR},
- 'g': {'X': stat.S_IXGRP},
- 'o': {'X': stat.S_IXOTH}
- }
- else:
- X_perms = {
- 'u': {'X': 0},
- 'g': {'X': 0},
- 'o': {'X': 0}
- }
-
- user_perms_to_modes = {
- 'u': {
- 'r': stat.S_IRUSR,
- 'w': stat.S_IWUSR,
- 'x': stat.S_IXUSR,
- 's': stat.S_ISUID,
- 't': 0,
- 'u': prev_mode & stat.S_IRWXU,
- 'g': (prev_mode & stat.S_IRWXG) << 3,
- 'o': (prev_mode & stat.S_IRWXO) << 6 },
- 'g': {
- 'r': stat.S_IRGRP,
- 'w': stat.S_IWGRP,
- 'x': stat.S_IXGRP,
- 's': stat.S_ISGID,
- 't': 0,
- 'u': (prev_mode & stat.S_IRWXU) >> 3,
- 'g': prev_mode & stat.S_IRWXG,
- 'o': (prev_mode & stat.S_IRWXO) << 3 },
- 'o': {
- 'r': stat.S_IROTH,
- 'w': stat.S_IWOTH,
- 'x': stat.S_IXOTH,
- 's': 0,
- 't': stat.S_ISVTX,
- 'u': (prev_mode & stat.S_IRWXU) >> 6,
- 'g': (prev_mode & stat.S_IRWXG) >> 3,
- 'o': prev_mode & stat.S_IRWXO }
- }
-
- # Insert X_perms into user_perms_to_modes
- for key, value in X_perms.items():
- user_perms_to_modes[key].update(value)
-
- or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
- return reduce(or_reduce, perms, 0)
-
- def set_fs_attributes_if_different(self, file_args, changed):
- # set modes owners and context as needed
- changed = self.set_context_if_different(
- file_args['path'], file_args['secontext'], changed
- )
- changed = self.set_owner_if_different(
- file_args['path'], file_args['owner'], changed
- )
- changed = self.set_group_if_different(
- file_args['path'], file_args['group'], changed
- )
- changed = self.set_mode_if_different(
- file_args['path'], file_args['mode'], changed
- )
- return changed
-
- def set_directory_attributes_if_different(self, file_args, changed):
- return self.set_fs_attributes_if_different(file_args, changed)
-
- def set_file_attributes_if_different(self, file_args, changed):
- return self.set_fs_attributes_if_different(file_args, changed)
-
- def add_path_info(self, kwargs):
- '''
- for results that are files, supplement the info about the file
- in the return path with stats about the file path.
- '''
-
- path = kwargs.get('path', kwargs.get('dest', None))
- if path is None:
- return kwargs
- if os.path.exists(path):
- (uid, gid) = self.user_and_group(path)
- kwargs['uid'] = uid
- kwargs['gid'] = gid
- try:
- user = pwd.getpwuid(uid)[0]
- except KeyError:
- user = str(uid)
- try:
- group = grp.getgrgid(gid)[0]
- except KeyError:
- group = str(gid)
- kwargs['owner'] = user
- kwargs['group'] = group
- st = os.lstat(path)
- kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
- # secontext not yet supported
- if os.path.islink(path):
- kwargs['state'] = 'link'
- elif os.path.isdir(path):
- kwargs['state'] = 'directory'
- elif os.stat(path).st_nlink > 1:
- kwargs['state'] = 'hard'
- else:
- kwargs['state'] = 'file'
- if HAVE_SELINUX and self.selinux_enabled():
- kwargs['secontext'] = ':'.join(self.selinux_context(path))
- kwargs['size'] = st[stat.ST_SIZE]
- else:
- kwargs['state'] = 'absent'
- return kwargs
-
- def _check_locale(self):
- '''
- Uses the locale module to test the currently set locale
- (per the LANG and LC_CTYPE environment settings)
- '''
- try:
- # setting the locale to '' uses the default locale
- # as it would be returned by locale.getdefaultlocale()
- locale.setlocale(locale.LC_ALL, '')
- except locale.Error, e:
- # fallback to the 'C' locale, which may cause unicode
- # issues but is preferable to simply failing because
- # of an unknown locale
- locale.setlocale(locale.LC_ALL, 'C')
- os.environ['LANG'] = 'C'
- os.environ['LC_CTYPE'] = 'C'
- os.environ['LC_MESSAGES'] = 'C'
- except Exception, e:
- self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
-
- def _handle_aliases(self):
- aliases_results = {} #alias:canon
- for (k,v) in self.argument_spec.iteritems():
- self._legal_inputs.append(k)
- aliases = v.get('aliases', None)
- default = v.get('default', None)
- required = v.get('required', False)
- if default is not None and required:
- # not alias specific but this is a good place to check this
- self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k)
- if aliases is None:
- continue
- if type(aliases) != list:
- self.fail_json(msg='internal error: aliases must be a list')
- for alias in aliases:
- self._legal_inputs.append(alias)
- aliases_results[alias] = k
- if alias in self.params:
- self.params[k] = self.params[alias]
-
- return aliases_results
-
- def _check_for_check_mode(self):
- for (k,v) in self.params.iteritems():
- if k == 'CHECKMODE':
- if not self.supports_check_mode:
- self.exit_json(skipped=True, msg="remote module does not support check mode")
- if self.supports_check_mode:
- self.check_mode = True
-
- def _check_for_no_log(self):
- for (k,v) in self.params.iteritems():
- if k == 'NO_LOG':
- self.no_log = self.boolean(v)
-
- def _check_invalid_arguments(self):
- for (k,v) in self.params.iteritems():
- # these should be in legal inputs already
- #if k in ('CHECKMODE', 'NO_LOG'):
- # continue
- if k not in self._legal_inputs:
- self.fail_json(msg="unsupported parameter for module: %s" % k)
-
- def _count_terms(self, check):
- count = 0
- for term in check:
- if term in self.params:
- count += 1
- return count
-
- def _check_mutually_exclusive(self, spec):
- if spec is None:
- return
- for check in spec:
- count = self._count_terms(check)
- if count > 1:
- self.fail_json(msg="parameters are mutually exclusive: %s" % check)
-
- def _check_required_one_of(self, spec):
- if spec is None:
- return
- for check in spec:
- count = self._count_terms(check)
- if count == 0:
- self.fail_json(msg="one of the following is required: %s" % ','.join(check))
-
- def _check_required_together(self, spec):
- if spec is None:
- return
- for check in spec:
- counts = [ self._count_terms([field]) for field in check ]
- non_zero = [ c for c in counts if c > 0 ]
- if len(non_zero) > 0:
- if 0 in counts:
- self.fail_json(msg="parameters are required together: %s" % check)
-
- def _check_required_arguments(self):
- ''' ensure all required arguments are present '''
- missing = []
- for (k,v) in self.argument_spec.iteritems():
- required = v.get('required', False)
- if required and k not in self.params:
- missing.append(k)
- if len(missing) > 0:
- self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
-
- def _check_required_if(self, spec):
- ''' ensure that parameters which conditionally required are present '''
- if spec is None:
- return
- for (key, val, requirements) in spec:
- missing = []
- if key in self.params and self.params[key] == val:
- for check in requirements:
- count = self._count_terms(check)
- if count == 0:
- missing.append(check)
- if len(missing) > 0:
- self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
-
- def _check_argument_values(self):
- ''' ensure all arguments have the requested values, and there are no stray arguments '''
- for (k,v) in self.argument_spec.iteritems():
- choices = v.get('choices',None)
- if choices is None:
- continue
- if type(choices) == list:
- if k in self.params:
- if self.params[k] not in choices:
- choices_str=",".join([str(c) for c in choices])
- msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
- self.fail_json(msg=msg)
- else:
- self.fail_json(msg="internal error: do not know how to interpret argument_spec")
-
- def safe_eval(self, str, locals=None, include_exceptions=False):
-
- # do not allow method calls to modules
- if not isinstance(str, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (str, None)
- return str
- if re.search(r'\w\.\w+\(', str):
- if include_exceptions:
- return (str, None)
- return str
- # do not allow imports
- if re.search(r'import \w+', str):
- if include_exceptions:
- return (str, None)
- return str
- try:
- result = None
- if not locals:
- result = _literal_eval(str)
- else:
- result = _literal_eval(str, None, locals)
- if include_exceptions:
- return (result, None)
- else:
- return result
- except Exception, e:
- if include_exceptions:
- return (str, e)
- return str
-
- def _check_argument_types(self):
- ''' ensure all arguments have the requested type '''
- for (k, v) in self.argument_spec.iteritems():
- wanted = v.get('type', None)
- if wanted is None:
- continue
- if k not in self.params:
- continue
-
- value = self.params[k]
- is_invalid = False
-
- try:
- if wanted == 'str':
- if not isinstance(value, basestring):
- self.params[k] = str(value)
- elif wanted == 'list':
- if not isinstance(value, list):
- if isinstance(value, basestring):
- self.params[k] = value.split(",")
- elif isinstance(value, int) or isinstance(value, float):
- self.params[k] = [ str(value) ]
- else:
- is_invalid = True
- elif wanted == 'dict':
- if not isinstance(value, dict):
- if isinstance(value, basestring):
- if value.startswith("{"):
- try:
- self.params[k] = json.loads(value)
- except:
- (result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
- if exc is not None:
- self.fail_json(msg="unable to evaluate dictionary for %s" % k)
- self.params[k] = result
- elif '=' in value:
- self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")])
- else:
- self.fail_json(msg="dictionary requested, could not parse JSON or key=value")
- else:
- is_invalid = True
- elif wanted == 'bool':
- if not isinstance(value, bool):
- if isinstance(value, basestring):
- self.params[k] = self.boolean(value)
- else:
- is_invalid = True
- elif wanted == 'int':
- if not isinstance(value, int):
- if isinstance(value, basestring):
- self.params[k] = int(value)
- else:
- is_invalid = True
- elif wanted == 'float':
- if not isinstance(value, float):
- if isinstance(value, basestring):
- self.params[k] = float(value)
- else:
- is_invalid = True
- else:
- self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
-
- if is_invalid:
- self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted))
- except ValueError, e:
- self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted))
-
- def _set_defaults(self, pre=True):
- for (k,v) in self.argument_spec.iteritems():
- default = v.get('default', None)
- if pre == True:
- # this prevents setting defaults on required items
- if default is not None and k not in self.params:
- self.params[k] = default
- else:
- # make sure things without a default still get set None
- if k not in self.params:
- self.params[k] = default
-
- def _load_params(self):
- ''' read the input and return a dictionary and the arguments string '''
- args = MODULE_ARGS
- items = shlex.split(args)
- params = {}
- for x in items:
- try:
- (k, v) = x.split("=",1)
- except Exception, e:
- self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
- if k in params:
- self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
- params[k] = v
- params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- params2.update(params)
- return (params2, args)
-
- def _log_invocation(self):
- ''' log that ansible ran the module '''
- # TODO: generalize a separate log function and make log_invocation use it
- # Sanitize possible password argument when logging.
- log_args = dict()
- passwd_keys = ['password', 'login_password']
-
- for param in self.params:
- canon = self.aliases.get(param, param)
- arg_opts = self.argument_spec.get(canon, {})
- no_log = arg_opts.get('no_log', False)
-
- if self.boolean(no_log):
- log_args[param] = 'NOT_LOGGING_PARAMETER'
- elif param in passwd_keys:
- log_args[param] = 'NOT_LOGGING_PASSWORD'
- else:
- param_val = self.params[param]
- if not isinstance(param_val, basestring):
- param_val = str(param_val)
- elif isinstance(param_val, unicode):
- param_val = param_val.encode('utf-8')
- log_args[param] = heuristic_log_sanitize(param_val)
-
- module = 'ansible-%s' % os.path.basename(__file__)
- msg = []
- for arg in log_args:
- arg_val = log_args[arg]
- if not isinstance(arg_val, basestring):
- arg_val = str(arg_val)
- elif isinstance(arg_val, unicode):
- arg_val = arg_val.encode('utf-8')
- msg.append('%s=%s ' % (arg, arg_val))
- if msg:
- msg = 'Invoked with %s' % ''.join(msg)
- else:
- msg = 'Invoked'
-
- # 6655 - allow for accented characters
- if isinstance(msg, unicode):
- # We should never get here as msg should be type str, not unicode
- msg = msg.encode('utf-8')
-
- if (has_journal):
- journal_args = [("MODULE", os.path.basename(__file__))]
- for arg in log_args:
- journal_args.append((arg.upper(), str(log_args[arg])))
- try:
- journal.send("%s %s" % (module, msg), **dict(journal_args))
- except IOError, e:
- # fall back to syslog since logging to journal failed
- syslog.openlog(str(module), 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg) #1
- else:
- syslog.openlog(str(module), 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg) #2
-
- def _set_cwd(self):
- try:
- cwd = os.getcwd()
- if not os.access(cwd, os.F_OK|os.R_OK):
- raise
- return cwd
- except:
- # we don't have access to the cwd, probably because of sudo.
- # Try and move to a neutral location to prevent errors
- for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
- try:
- if os.access(cwd, os.F_OK|os.R_OK):
- os.chdir(cwd)
- return cwd
- except:
- pass
- # we won't error here, as it may *not* be a problem,
- # and we don't want to break modules unnecessarily
- return None
-
- def get_bin_path(self, arg, required=False, opt_dirs=[]):
- '''
- find system executable in PATH.
- Optional arguments:
- - required: if executable is not found and required is true, fail_json
- - opt_dirs: optional list of directories to search in addition to PATH
- if found return full path; otherwise return None
- '''
- sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
- paths = []
- for d in opt_dirs:
- if d is not None and os.path.exists(d):
- paths.append(d)
- paths += os.environ.get('PATH', '').split(os.pathsep)
- bin_path = None
- # mangle PATH to include /sbin dirs
- for p in sbin_paths:
- if p not in paths and os.path.exists(p):
- paths.append(p)
- for d in paths:
- path = os.path.join(d, arg)
- if os.path.exists(path) and self.is_executable(path):
- bin_path = path
- break
- if required and bin_path is None:
- self.fail_json(msg='Failed to find required executable %s' % arg)
- return bin_path
-
- def boolean(self, arg):
- ''' return a bool for the arg '''
- if arg is None or type(arg) == bool:
- return arg
- if type(arg) in types.StringTypes:
- arg = arg.lower()
- if arg in BOOLEANS_TRUE:
- return True
- elif arg in BOOLEANS_FALSE:
- return False
- else:
- self.fail_json(msg='Boolean %s not in either boolean list' % arg)
-
- def jsonify(self, data):
- for encoding in ("utf-8", "latin-1", "unicode_escape"):
- try:
- return json.dumps(data, encoding=encoding)
- # Old systems using simplejson module does not support encoding keyword.
- except TypeError, e:
- return json.dumps(data)
- except UnicodeDecodeError, e:
- continue
- self.fail_json(msg='Invalid unicode encoding encountered')
-
- def from_json(self, data):
- return json.loads(data)
-
- def add_cleanup_file(self, path):
- if path not in self.cleanup_files:
- self.cleanup_files.append(path)
-
- def do_cleanup_files(self):
- for path in self.cleanup_files:
- self.cleanup(path)
-
- def exit_json(self, **kwargs):
- ''' return from the module, without error '''
- self.add_path_info(kwargs)
- if not 'changed' in kwargs:
- kwargs['changed'] = False
- self.do_cleanup_files()
- print self.jsonify(kwargs)
- sys.exit(0)
-
- def fail_json(self, **kwargs):
- ''' return from the module, with an error message '''
- self.add_path_info(kwargs)
- assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
- kwargs['failed'] = True
- self.do_cleanup_files()
- print self.jsonify(kwargs)
- sys.exit(1)
-
- def is_executable(self, path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
- def digest_from_file(self, filename, digest_method):
- ''' Return hex digest of local file for a given digest_method, or None if file is not present. '''
- if not os.path.exists(filename):
- return None
- if os.path.isdir(filename):
- self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
- digest = digest_method
- blocksize = 64 * 1024
- infile = open(filename, 'rb')
- block = infile.read(blocksize)
- while block:
- digest.update(block)
- block = infile.read(blocksize)
- infile.close()
- return digest.hexdigest()
-
- def md5(self, filename):
- ''' Return MD5 hex digest of local file using digest_from_file().
-
- Do not use this function unless you have no other choice for:
- 1) Optional backwards compatibility
- 2) Compatibility with a third party protocol
-
- This function will not work on systems complying with FIPS-140-2.
-
- Most uses of this function can use the module.sha1 function instead.
- '''
- if not _md5:
- raise ValueError('MD5 not available. Possibly running in FIPS mode')
- return self.digest_from_file(filename, _md5())
-
- def sha1(self, filename):
- ''' Return SHA1 hex digest of local file using digest_from_file(). '''
- return self.digest_from_file(filename, _sha1())
-
- def sha256(self, filename):
- ''' Return SHA-256 hex digest of local file using digest_from_file(). '''
- if not HAVE_HASHLIB:
- self.fail_json(msg="SHA-256 checksums require hashlib, which is available in Python 2.5 and higher")
- return self.digest_from_file(filename, _sha256())
-
- def backup_local(self, fn):
- '''make a date-marked backup of the specified file, return True or False on success or failure'''
-
- backupdest = ''
- if os.path.exists(fn):
- # backups named basename-YYYY-MM-DD@HH:MM:SS~
- ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
- backupdest = '%s.%s' % (fn, ext)
-
- try:
- shutil.copy2(fn, backupdest)
- except (shutil.Error, IOError), e:
- self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
-
- return backupdest
-
- def cleanup(self, tmpfile):
- if os.path.exists(tmpfile):
- try:
- os.unlink(tmpfile)
- except OSError, e:
- sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
-
- def atomic_move(self, src, dest):
- '''atomically move src to dest, copying attributes from dest, returns true on success
- it uses os.rename to ensure this as it is an atomic operation, rest of the function is
- to work around limitations, corner cases and ensure selinux context is saved if possible'''
- context = None
- dest_stat = None
- if os.path.exists(dest):
- try:
- dest_stat = os.stat(dest)
- os.chmod(src, dest_stat.st_mode & 07777)
- os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
- except OSError, e:
- if e.errno != errno.EPERM:
- raise
- if self.selinux_enabled():
- context = self.selinux_context(dest)
- else:
- if self.selinux_enabled():
- context = self.selinux_default_context(dest)
-
- creating = not os.path.exists(dest)
-
- try:
- login_name = os.getlogin()
- except OSError:
- # not having a tty can cause the above to fail, so
- # just get the LOGNAME environment variable instead
- login_name = os.environ.get('LOGNAME', None)
-
- # if the original login_name doesn't match the currently
- # logged-in user, or if the SUDO_USER environment variable
- # is set, then this user has switched their credentials
- switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
-
- try:
- # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
- os.rename(src, dest)
- except (IOError,OSError), e:
- # only try workarounds for errno 18 (cross device), 1 (not permitted) and 13 (permission denied)
- if e.errno != errno.EPERM and e.errno != errno.EXDEV and e.errno != errno.EACCES:
- self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
-
- dest_dir = os.path.dirname(dest)
- dest_file = os.path.basename(dest)
- try:
- tmp_dest = tempfile.NamedTemporaryFile(
- prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
- except (OSError, IOError), e:
- self.fail_json(msg='The destination directory (%s) is not writable by the current user.' % dest_dir)
-
- try: # leaves tmp file behind when sudo and not root
- if switched_user and os.getuid() != 0:
- # cleanup will happen by 'rm' of tempdir
- # copy2 will preserve some metadata
- shutil.copy2(src, tmp_dest.name)
- else:
- shutil.move(src, tmp_dest.name)
- if self.selinux_enabled():
- self.set_context_if_different(
- tmp_dest.name, context, False)
- try:
- tmp_stat = os.stat(tmp_dest.name)
- if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
- os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
- except OSError, e:
- if e.errno != errno.EPERM:
- raise
- os.rename(tmp_dest.name, dest)
- except (shutil.Error, OSError, IOError), e:
- self.cleanup(tmp_dest.name)
- self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
-
- if creating:
- # make sure the file has the correct permissions
- # based on the current value of umask
- umask = os.umask(0)
- os.umask(umask)
- os.chmod(dest, 0666 & ~umask)
- if switched_user:
- os.chown(dest, os.getuid(), os.getgid())
-
- if self.selinux_enabled():
- # rename might not preserve context
- self.set_context_if_different(dest, context, False)
-
- def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None):
- '''
- Execute a command, returns rc, stdout, and stderr.
- args is the command to run
- If args is a list, the command will be run with shell=False.
- If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
- If args is a string and use_unsafe_shell=True it run with shell=True.
- Other arguments:
- - check_rc (boolean) Whether to call fail_json in case of
- non zero RC. Default is False.
- - close_fds (boolean) See documentation for subprocess.Popen().
- Default is True.
- - executable (string) See documentation for subprocess.Popen().
- Default is None.
- - prompt_regex (string) A regex string (not a compiled regex) which
- can be used to detect prompts in the stdout
- which would otherwise cause the execution
- to hang (especially if no input data is
- specified)
- '''
-
- shell = False
- if isinstance(args, list):
- if use_unsafe_shell:
- args = " ".join([pipes.quote(x) for x in args])
- shell = True
- elif isinstance(args, basestring) and use_unsafe_shell:
- shell = True
- elif isinstance(args, basestring):
- args = shlex.split(args.encode('utf-8'))
- else:
- msg = "Argument 'args' to run_command must be list or string"
- self.fail_json(rc=257, cmd=args, msg=msg)
-
- prompt_re = None
- if prompt_regex:
- try:
- prompt_re = re.compile(prompt_regex, re.MULTILINE)
- except re.error:
- self.fail_json(msg="invalid prompt regular expression given to run_command")
-
- # expand things like $HOME and ~
- if not shell:
- args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ]
-
- rc = 0
- msg = None
- st_in = None
-
- # Set a temporart env path if a prefix is passed
- env=os.environ
- if path_prefix:
- env['PATH']="%s:%s" % (path_prefix, env['PATH'])
-
- # create a printable version of the command for use
- # in reporting later, which strips out things like
- # passwords from the args list
- if isinstance(args, basestring):
- if isinstance(args, unicode):
- b_args = args.encode('utf-8')
- else:
- b_args = args
- to_clean_args = shlex.split(b_args)
- del b_args
- else:
- to_clean_args = args
-
- clean_args = []
- is_passwd = False
- for arg in to_clean_args:
- if is_passwd:
- is_passwd = False
- clean_args.append('********')
- continue
- if PASSWD_ARG_RE.match(arg):
- sep_idx = arg.find('=')
- if sep_idx > -1:
- clean_args.append('%s=********' % arg[:sep_idx])
- continue
- else:
- is_passwd = True
- clean_args.append(heuristic_log_sanitize(arg))
- clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
-
- if data:
- st_in = subprocess.PIPE
-
- kwargs = dict(
- executable=executable,
- shell=shell,
- close_fds=close_fds,
- stdin=st_in,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
- )
-
- if path_prefix:
- kwargs['env'] = env
- if cwd and os.path.isdir(cwd):
- kwargs['cwd'] = cwd
-
- # store the pwd
- prev_dir = os.getcwd()
-
- # make sure we're in the right working directory
- if cwd and os.path.isdir(cwd):
- try:
- os.chdir(cwd)
- except (OSError, IOError), e:
- self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
-
- try:
- cmd = subprocess.Popen(args, **kwargs)
-
- # the communication logic here is essentially taken from that
- # of the _communicate() function in ssh.py
-
- stdout = ''
- stderr = ''
- rpipes = [cmd.stdout, cmd.stderr]
-
- if data:
- if not binary_data:
- data += '\n'
- cmd.stdin.write(data)
- cmd.stdin.close()
-
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
- if cmd.stdout in rfd:
- dat = os.read(cmd.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(cmd.stdout)
- if cmd.stderr in rfd:
- dat = os.read(cmd.stderr.fileno(), 9000)
- stderr += dat
- if dat == '':
- rpipes.remove(cmd.stderr)
- # if we're checking for prompts, do it now
- if prompt_re:
- if prompt_re.search(stdout) and not data:
- return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
- # only break out if no pipes are left to read or
- # the pipes are completely read and
- # the process is terminated
- if (not rpipes or not rfd) and cmd.poll() is not None:
- break
- # No pipes are left to read but process is not yet terminated
- # Only then it is safe to wait for the process to be finished
- # NOTE: Actually cmd.poll() is always None here if rpipes is empty
- elif not rpipes and cmd.poll() == None:
- cmd.wait()
- # The process is terminated. Since no pipes to read from are
- # left, there is no need to call select() again.
- break
-
- cmd.stdout.close()
- cmd.stderr.close()
-
- rc = cmd.returncode
- except (OSError, IOError), e:
- self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
- except:
- self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
-
- if rc != 0 and check_rc:
- msg = heuristic_log_sanitize(stderr.rstrip())
- self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
-
- # reset the pwd
- os.chdir(prev_dir)
-
- return (rc, stdout, stderr)
-
- def append_to_file(self, filename, str):
- filename = os.path.expandvars(os.path.expanduser(filename))
- fh = open(filename, 'a')
- fh.write(str)
- fh.close()
-
- def pretty_bytes(self,size):
- ranges = (
- (1<<70L, 'ZB'),
- (1<<60L, 'EB'),
- (1<<50L, 'PB'),
- (1<<40L, 'TB'),
- (1<<30L, 'GB'),
- (1<<20L, 'MB'),
- (1<<10L, 'KB'),
- (1, 'Bytes')
- )
- for limit, suffix in ranges:
- if size >= limit:
- break
- return '%.2f %s' % (float(size)/ limit, suffix)
-
-def get_module_path():
- return os.path.dirname(os.path.realpath(__file__))
diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py
deleted file mode 100644
index 752defec2b..0000000000
--- a/v1/ansible/module_utils/cloudstack.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, René Moser <mail@renemoser.net>
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-try:
- from cs import CloudStack, CloudStackException, read_config
- has_lib_cs = True
-except ImportError:
- has_lib_cs = False
-
-
-class AnsibleCloudStack:
-
- def __init__(self, module):
- if not has_lib_cs:
- module.fail_json(msg="python library cs required: pip install cs")
-
- self.result = {
- 'changed': False,
- }
-
- self.module = module
- self._connect()
-
- self.domain = None
- self.account = None
- self.project = None
- self.ip_address = None
- self.zone = None
- self.vm = None
- self.os_type = None
- self.hypervisor = None
- self.capabilities = None
-
-
- def _connect(self):
- api_key = self.module.params.get('api_key')
- api_secret = self.module.params.get('secret_key')
- api_url = self.module.params.get('api_url')
- api_http_method = self.module.params.get('api_http_method')
- api_timeout = self.module.params.get('api_timeout')
-
- if api_key and api_secret and api_url:
- self.cs = CloudStack(
- endpoint=api_url,
- key=api_key,
- secret=api_secret,
- timeout=api_timeout,
- method=api_http_method
- )
- else:
- self.cs = CloudStack(**read_config())
-
-
- def get_or_fallback(self, key=None, fallback_key=None):
- value = self.module.params.get(key)
- if not value:
- value = self.module.params.get(fallback_key)
- return value
-
-
- # TODO: for backward compatibility only, remove if not used anymore
- def _has_changed(self, want_dict, current_dict, only_keys=None):
- return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
-
-
- def has_changed(self, want_dict, current_dict, only_keys=None):
- for key, value in want_dict.iteritems():
-
- # Optionally limit by a list of keys
- if only_keys and key not in only_keys:
- continue;
-
- # Skip None values
- if value is None:
- continue;
-
- if key in current_dict:
-
- # API returns string for int in some cases, just to make sure
- if isinstance(value, int):
- current_dict[key] = int(current_dict[key])
- elif isinstance(value, str):
- current_dict[key] = str(current_dict[key])
-
- # Only need to detect a singe change, not every item
- if value != current_dict[key]:
- return True
- return False
-
-
- def _get_by_key(self, key=None, my_dict={}):
- if key:
- if key in my_dict:
- return my_dict[key]
- self.module.fail_json(msg="Something went wrong: %s not found" % key)
- return my_dict
-
-
- def get_project(self, key=None):
- if self.project:
- return self._get_by_key(key, self.project)
-
- project = self.module.params.get('project')
- if not project:
- return None
- args = {}
- args['account'] = self.get_account(key='name')
- args['domainid'] = self.get_domain(key='id')
- projects = self.cs.listProjects(**args)
- if projects:
- for p in projects['project']:
- if project.lower() in [ p['name'].lower(), p['id'] ]:
- self.project = p
- return self._get_by_key(key, self.project)
- self.module.fail_json(msg="project '%s' not found" % project)
-
-
- def get_ip_address(self, key=None):
- if self.ip_address:
- return self._get_by_key(key, self.ip_address)
-
- ip_address = self.module.params.get('ip_address')
- if not ip_address:
- self.module.fail_json(msg="IP address param 'ip_address' is required")
-
- args = {}
- args['ipaddress'] = ip_address
- args['account'] = self.get_account(key='name')
- args['domainid'] = self.get_domain(key='id')
- args['projectid'] = self.get_project(key='id')
- ip_addresses = self.cs.listPublicIpAddresses(**args)
-
- if not ip_addresses:
- self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
-
- self.ip_address = ip_addresses['publicipaddress'][0]
- return self._get_by_key(key, self.ip_address)
-
-
- def get_vm(self, key=None):
- if self.vm:
- return self._get_by_key(key, self.vm)
-
- vm = self.module.params.get('vm')
- if not vm:
- self.module.fail_json(msg="Virtual machine param 'vm' is required")
-
- args = {}
- args['account'] = self.get_account(key='name')
- args['domainid'] = self.get_domain(key='id')
- args['projectid'] = self.get_project(key='id')
- args['zoneid'] = self.get_zone(key='id')
- vms = self.cs.listVirtualMachines(**args)
- if vms:
- for v in vms['virtualmachine']:
- if vm in [ v['name'], v['displayname'], v['id'] ]:
- self.vm = v
- return self._get_by_key(key, self.vm)
- self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
-
-
- def get_zone(self, key=None):
- if self.zone:
- return self._get_by_key(key, self.zone)
-
- zone = self.module.params.get('zone')
- zones = self.cs.listZones()
-
- # use the first zone if no zone param given
- if not zone:
- self.zone = zones['zone'][0]
- return self._get_by_key(key, self.zone)
-
- if zones:
- for z in zones['zone']:
- if zone in [ z['name'], z['id'] ]:
- self.zone = z
- return self._get_by_key(key, self.zone)
- self.module.fail_json(msg="zone '%s' not found" % zone)
-
-
- def get_os_type(self, key=None):
- if self.os_type:
- return self._get_by_key(key, self.zone)
-
- os_type = self.module.params.get('os_type')
- if not os_type:
- return None
-
- os_types = self.cs.listOsTypes()
- if os_types:
- for o in os_types['ostype']:
- if os_type in [ o['description'], o['id'] ]:
- self.os_type = o
- return self._get_by_key(key, self.os_type)
- self.module.fail_json(msg="OS type '%s' not found" % os_type)
-
-
- def get_hypervisor(self):
- if self.hypervisor:
- return self.hypervisor
-
- hypervisor = self.module.params.get('hypervisor')
- hypervisors = self.cs.listHypervisors()
-
- # use the first hypervisor if no hypervisor param given
- if not hypervisor:
- self.hypervisor = hypervisors['hypervisor'][0]['name']
- return self.hypervisor
-
- for h in hypervisors['hypervisor']:
- if hypervisor.lower() == h['name'].lower():
- self.hypervisor = h['name']
- return self.hypervisor
- self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
-
-
- def get_account(self, key=None):
- if self.account:
- return self._get_by_key(key, self.account)
-
- account = self.module.params.get('account')
- if not account:
- return None
-
- domain = self.module.params.get('domain')
- if not domain:
- self.module.fail_json(msg="Account must be specified with Domain")
-
- args = {}
- args['name'] = account
- args['domainid'] = self.get_domain(key='id')
- args['listall'] = True
- accounts = self.cs.listAccounts(**args)
- if accounts:
- self.account = accounts['account'][0]
- return self._get_by_key(key, self.account)
- self.module.fail_json(msg="Account '%s' not found" % account)
-
-
- def get_domain(self, key=None):
- if self.domain:
- return self._get_by_key(key, self.domain)
-
- domain = self.module.params.get('domain')
- if not domain:
- return None
-
- args = {}
- args['listall'] = True
- domains = self.cs.listDomains(**args)
- if domains:
- for d in domains['domain']:
- if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
- self.domain = d
- return self._get_by_key(key, self.domain)
- self.module.fail_json(msg="Domain '%s' not found" % domain)
-
-
- def get_tags(self, resource=None):
- existing_tags = self.cs.listTags(resourceid=resource['id'])
- if existing_tags:
- return existing_tags['tag']
- return []
-
-
- def _delete_tags(self, resource, resource_type, tags):
- existing_tags = resource['tags']
- tags_to_delete = []
- for existing_tag in existing_tags:
- if existing_tag['key'] in tags:
- if existing_tag['value'] != tags[key]:
- tags_to_delete.append(existing_tag)
- else:
- tags_to_delete.append(existing_tag)
- if tags_to_delete:
- self.result['changed'] = True
- if not self.module.check_mode:
- args = {}
- args['resourceids'] = resource['id']
- args['resourcetype'] = resource_type
- args['tags'] = tags_to_delete
- self.cs.deleteTags(**args)
-
-
- def _create_tags(self, resource, resource_type, tags):
- tags_to_create = []
- for i, tag_entry in enumerate(tags):
- tag = {
- 'key': tag_entry['key'],
- 'value': tag_entry['value'],
- }
- tags_to_create.append(tag)
- if tags_to_create:
- self.result['changed'] = True
- if not self.module.check_mode:
- args = {}
- args['resourceids'] = resource['id']
- args['resourcetype'] = resource_type
- args['tags'] = tags_to_create
- self.cs.createTags(**args)
-
-
- def ensure_tags(self, resource, resource_type=None):
- if not resource_type or not resource:
- self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
-
- if 'tags' in resource:
- tags = self.module.params.get('tags')
- if tags is not None:
- self._delete_tags(resource, resource_type, tags)
- self._create_tags(resource, resource_type, tags)
- resource['tags'] = self.get_tags(resource)
- return resource
-
-
- def get_capabilities(self, key=None):
- if self.capabilities:
- return self._get_by_key(key, self.capabilities)
- capabilities = self.cs.listCapabilities()
- self.capabilities = capabilities['capability']
- return self._get_by_key(key, self.capabilities)
-
-
- # TODO: for backward compatibility only, remove if not used anymore
- def _poll_job(self, job=None, key=None):
- return self.poll_job(job=job, key=key)
-
-
- def poll_job(self, job=None, key=None):
- if 'jobid' in job:
- while True:
- res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
- if res['jobstatus'] != 0 and 'jobresult' in res:
- if 'errortext' in res['jobresult']:
- self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
- if key and key in res['jobresult']:
- job = res['jobresult'][key]
- break
- time.sleep(2)
- return job
diff --git a/v1/ansible/module_utils/database.py b/v1/ansible/module_utils/database.py
deleted file mode 100644
index 6170614e90..0000000000
--- a/v1/ansible/module_utils/database.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-class SQLParseError(Exception):
- pass
-
-class UnclosedQuoteError(SQLParseError):
- pass
-
-# maps a type of identifier to the maximum number of dot levels that are
-# allowed to specify that identifier. For example, a database column can be
-# specified by up to 4 levels: database.schema.table.column
-_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1)
-_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
-
-def _find_end_quote(identifier, quote_char):
- accumulate = 0
- while True:
- try:
- quote = identifier.index(quote_char)
- except ValueError:
- raise UnclosedQuoteError
- accumulate = accumulate + quote
- try:
- next_char = identifier[quote+1]
- except IndexError:
- return accumulate
- if next_char == quote_char:
- try:
- identifier = identifier[quote+2:]
- accumulate = accumulate + 2
- except IndexError:
- raise UnclosedQuoteError
- else:
- return accumulate
-
-
-def _identifier_parse(identifier, quote_char):
- if not identifier:
- raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
-
- already_quoted = False
- if identifier.startswith(quote_char):
- already_quoted = True
- try:
- end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
- except UnclosedQuoteError:
- already_quoted = False
- else:
- if end_quote < len(identifier) - 1:
- if identifier[end_quote+1] == '.':
- dot = end_quote + 1
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot+1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- further_identifiers.insert(0, first_identifier)
- else:
- raise SQLParseError('User escaped identifiers must escape extra quotes')
- else:
- further_identifiers = [identifier]
-
- if not already_quoted:
- try:
- dot = identifier.index('.')
- except ValueError:
- identifier = identifier.replace(quote_char, quote_char*2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- if dot == 0 or dot >= len(identifier) - 1:
- identifier = identifier.replace(quote_char, quote_char*2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot+1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- first_identifier = first_identifier.replace(quote_char, quote_char*2)
- first_identifier = ''.join((quote_char, first_identifier, quote_char))
- further_identifiers.insert(0, first_identifier)
-
- return further_identifiers
-
-
-def pg_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='"')
- if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
- return '.'.join(identifier_fragments)
-
-def mysql_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='`')
- if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
-
- special_cased_fragments = []
- for fragment in identifier_fragments:
- if fragment == '`*`':
- special_cased_fragments.append('*')
- else:
- special_cased_fragments.append(fragment)
-
- return '.'.join(special_cased_fragments)
diff --git a/v1/ansible/module_utils/ec2.py b/v1/ansible/module_utils/ec2.py
deleted file mode 100644
index d02c3476f2..0000000000
--- a/v1/ansible/module_utils/ec2.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- from distutils.version import LooseVersion
- HAS_LOOSE_VERSION = True
-except:
- HAS_LOOSE_VERSION = False
-
-
-
-def aws_common_argument_spec():
- return dict(
- ec2_url=dict(),
- aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
- aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
- validate_certs=dict(default=True, type='bool'),
- security_token=dict(aliases=['access_token'], no_log=True),
- profile=dict(),
- )
-
-
-def ec2_argument_spec():
- spec = aws_common_argument_spec()
- spec.update(
- dict(
- region=dict(aliases=['aws_region', 'ec2_region']),
- )
- )
- return spec
-
-
-def boto_supports_profile_name():
- return hasattr(boto.ec2.EC2Connection, 'profile_name')
-
-
-def get_aws_connection_info(module):
-
- # Check module args for credentials, then check environment vars
- # access_key
-
- ec2_url = module.params.get('ec2_url')
- access_key = module.params.get('aws_access_key')
- secret_key = module.params.get('aws_secret_key')
- security_token = module.params.get('security_token')
- region = module.params.get('region')
- profile_name = module.params.get('profile')
- validate_certs = module.params.get('validate_certs')
-
- if not ec2_url:
- if 'AWS_URL' in os.environ:
- ec2_url = os.environ['AWS_URL']
- elif 'EC2_URL' in os.environ:
- ec2_url = os.environ['EC2_URL']
-
- if not access_key:
- if 'AWS_ACCESS_KEY_ID' in os.environ:
- access_key = os.environ['AWS_ACCESS_KEY_ID']
- elif 'AWS_ACCESS_KEY' in os.environ:
- access_key = os.environ['AWS_ACCESS_KEY']
- elif 'EC2_ACCESS_KEY' in os.environ:
- access_key = os.environ['EC2_ACCESS_KEY']
- else:
- # in case access_key came in as empty string
- access_key = None
-
- if not secret_key:
- if 'AWS_SECRET_ACCESS_KEY' in os.environ:
- secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
- elif 'AWS_SECRET_KEY' in os.environ:
- secret_key = os.environ['AWS_SECRET_KEY']
- elif 'EC2_SECRET_KEY' in os.environ:
- secret_key = os.environ['EC2_SECRET_KEY']
- else:
- # in case secret_key came in as empty string
- secret_key = None
-
- if not region:
- if 'AWS_REGION' in os.environ:
- region = os.environ['AWS_REGION']
- elif 'EC2_REGION' in os.environ:
- region = os.environ['EC2_REGION']
- else:
- # boto.config.get returns None if config not found
- region = boto.config.get('Boto', 'aws_region')
- if not region:
- region = boto.config.get('Boto', 'ec2_region')
-
- if not security_token:
- if 'AWS_SECURITY_TOKEN' in os.environ:
- security_token = os.environ['AWS_SECURITY_TOKEN']
- elif 'EC2_SECURITY_TOKEN' in os.environ:
- security_token = os.environ['EC2_SECURITY_TOKEN']
- else:
- # in case security_token came in as empty string
- security_token = None
-
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- security_token=security_token)
-
- # profile_name only works as a key in boto >= 2.24
- # so only set profile_name if passed as an argument
- if profile_name:
- if not boto_supports_profile_name():
- module.fail_json("boto does not support profile_name before 2.24")
- boto_params['profile_name'] = profile_name
-
- if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
- boto_params['validate_certs'] = validate_certs
-
- return region, ec2_url, boto_params
-
-
-def get_ec2_creds(module):
- ''' for compatibility mode with old modules that don't/can't yet
- use ec2_connect method '''
- region, ec2_url, boto_params = get_aws_connection_info(module)
- return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
-
-
-def boto_fix_security_token_in_profile(conn, profile_name):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + profile_name
- if boto.config.has_option(profile, 'aws_security_token'):
- conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
- return conn
-
-
-def connect_to_aws(aws_module, region, **params):
- conn = aws_module.connect_to_region(region, **params)
- if not conn:
- if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
- raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
- else:
- raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
- if params.get('profile_name'):
- conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
- return conn
-
-
-def ec2_connect(module):
-
- """ Return an ec2 connection"""
-
- region, ec2_url, boto_params = get_aws_connection_info(module)
-
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- ec2 = connect_to_aws(boto.ec2, region, **boto_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- elif ec2_url:
- try:
- ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="Either region or ec2_url must be specified")
-
- return ec2
diff --git a/v1/ansible/module_utils/facts.py b/v1/ansible/module_utils/facts.py
deleted file mode 100644
index 1162e05b9c..0000000000
--- a/v1/ansible/module_utils/facts.py
+++ /dev/null
@@ -1,2786 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import stat
-import array
-import errno
-import fcntl
-import fnmatch
-import glob
-import platform
-import re
-import signal
-import socket
-import struct
-import datetime
-import getpass
-import pwd
-import ConfigParser
-import StringIO
-
-from string import maketrans
-
-try:
- import selinux
- HAVE_SELINUX=True
-except ImportError:
- HAVE_SELINUX=False
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-# --------------------------------------------------------------
-# timeout function to make sure some fact gathering
-# steps do not exceed a time limit
-
-class TimeoutError(Exception):
- pass
-
-def timeout(seconds=10, error_message="Timer expired"):
- def decorator(func):
- def _handle_timeout(signum, frame):
- raise TimeoutError(error_message)
-
- def wrapper(*args, **kwargs):
- signal.signal(signal.SIGALRM, _handle_timeout)
- signal.alarm(seconds)
- try:
- result = func(*args, **kwargs)
- finally:
- signal.alarm(0)
- return result
-
- return wrapper
-
- return decorator
-
-# --------------------------------------------------------------
-
-class Facts(object):
- """
- This class should only attempt to populate those facts that
- are mostly generic to all systems. This includes platform facts,
- service facts (e.g. ssh keys or selinux), and distribution facts.
- Anything that requires extensive code or may have more than one
- possible implementation to establish facts for a given topic should
- subclass Facts.
- """
-
- # i86pc is a Solaris and derivatives-ism
- _I386RE = re.compile(r'i([3456]86|86pc)')
- # For the most part, we assume that platform.dist() will tell the truth.
- # This is the fallback to handle unknowns or exceptions
- OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
- ('/etc/redhat-release', 'RedHat'),
- ('/etc/vmware-release', 'VMwareESX'),
- ('/etc/openwrt_release', 'OpenWrt'),
- ('/etc/system-release', 'OtherLinux'),
- ('/etc/alpine-release', 'Alpine'),
- ('/etc/release', 'Solaris'),
- ('/etc/arch-release', 'Archlinux'),
- ('/etc/SuSE-release', 'SuSE'),
- ('/etc/os-release', 'SuSE'),
- ('/etc/gentoo-release', 'Gentoo'),
- ('/etc/os-release', 'Debian'),
- ('/etc/lsb-release', 'Mandriva'),
- ('/etc/os-release', 'NA'),
- )
- SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
-
- # A list of dicts. If there is a platform with more than one
- # package manager, put the preferred one last. If there is an
- # ansible module, use that as the value for the 'name' key.
- PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
- { 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
- { 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
- { 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
- { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
- { 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
- { 'path' : '/bin/opkg', 'name' : 'opkg' },
- { 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
- { 'path' : '/opt/local/bin/port', 'name' : 'macports' },
- { 'path' : '/sbin/apk', 'name' : 'apk' },
- { 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
- { 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
- { 'path' : '/usr/bin/emerge', 'name' : 'portage' },
- { 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
- { 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
- ]
-
- def __init__(self, load_on_init=True):
-
- self.facts = {}
-
- if load_on_init:
- self.get_platform_facts()
- self.get_distribution_facts()
- self.get_cmdline()
- self.get_public_ssh_host_keys()
- self.get_selinux_facts()
- self.get_fips_facts()
- self.get_pkg_mgr_facts()
- self.get_lsb_facts()
- self.get_date_time_facts()
- self.get_user_facts()
- self.get_local_facts()
- self.get_env_facts()
-
- def populate(self):
- return self.facts
-
- # Platform
- # platform.system() can be Linux, Darwin, Java, or Windows
- def get_platform_facts(self):
- self.facts['system'] = platform.system()
- self.facts['kernel'] = platform.release()
- self.facts['machine'] = platform.machine()
- self.facts['python_version'] = platform.python_version()
- self.facts['fqdn'] = socket.getfqdn()
- self.facts['hostname'] = platform.node().split('.')[0]
- self.facts['nodename'] = platform.node()
- self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
- arch_bits = platform.architecture()[0]
- self.facts['userspace_bits'] = arch_bits.replace('bit', '')
- if self.facts['machine'] == 'x86_64':
- self.facts['architecture'] = self.facts['machine']
- if self.facts['userspace_bits'] == '64':
- self.facts['userspace_architecture'] = 'x86_64'
- elif self.facts['userspace_bits'] == '32':
- self.facts['userspace_architecture'] = 'i386'
- elif Facts._I386RE.search(self.facts['machine']):
- self.facts['architecture'] = 'i386'
- if self.facts['userspace_bits'] == '64':
- self.facts['userspace_architecture'] = 'x86_64'
- elif self.facts['userspace_bits'] == '32':
- self.facts['userspace_architecture'] = 'i386'
- else:
- self.facts['architecture'] = self.facts['machine']
- if self.facts['system'] == 'Linux':
- self.get_distribution_facts()
- elif self.facts['system'] == 'AIX':
- try:
- rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
- data = out.split('\n')
- self.facts['architecture'] = data[0]
- except:
- self.facts['architecture'] = 'Not Available'
- elif self.facts['system'] == 'OpenBSD':
- self.facts['architecture'] = platform.uname()[5]
-
-
- def get_local_facts(self):
-
- fact_path = module.params.get('fact_path', None)
- if not fact_path or not os.path.exists(fact_path):
- return
-
- local = {}
- for fn in sorted(glob.glob(fact_path + '/*.fact')):
- # where it will sit under local facts
- fact_base = os.path.basename(fn).replace('.fact','')
- if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
- # run it
- # try to read it as json first
- # if that fails read it with ConfigParser
- # if that fails, skip it
- rc, out, err = module.run_command(fn)
- else:
- out = get_file_content(fn, default='')
-
- # load raw json
- fact = 'loading %s' % fact_base
- try:
- fact = json.loads(out)
- except ValueError, e:
- # load raw ini
- cp = ConfigParser.ConfigParser()
- try:
- cp.readfp(StringIO.StringIO(out))
- except ConfigParser.Error, e:
- fact="error loading fact - please check content"
- else:
- fact = {}
- #print cp.sections()
- for sect in cp.sections():
- if sect not in fact:
- fact[sect] = {}
- for opt in cp.options(sect):
- val = cp.get(sect, opt)
- fact[sect][opt]=val
-
- local[fact_base] = fact
- if not local:
- return
- self.facts['local'] = local
-
- # platform.dist() is deprecated in 2.6
- # in 2.6 and newer, you should use platform.linux_distribution()
- def get_distribution_facts(self):
-
- # A list with OS Family members
- OS_FAMILY = dict(
- RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
- SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
- OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
- XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', SLES = 'Suse',
- SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
- Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
- Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
- SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
- FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
- )
-
- # TODO: Rewrite this to use the function references in a dict pattern
- # as it's much cleaner than this massive if-else
- if self.facts['system'] == 'AIX':
- self.facts['distribution'] = 'AIX'
- rc, out, err = module.run_command("/usr/bin/oslevel")
- data = out.split('.')
- self.facts['distribution_version'] = data[0]
- self.facts['distribution_release'] = data[1]
- elif self.facts['system'] == 'HP-UX':
- self.facts['distribution'] = 'HP-UX'
- rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
- data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
- if data:
- self.facts['distribution_version'] = data.groups()[0]
- self.facts['distribution_release'] = data.groups()[1]
- elif self.facts['system'] == 'Darwin':
- self.facts['distribution'] = 'MacOSX'
- rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
- data = out.split()[-1]
- self.facts['distribution_version'] = data
- elif self.facts['system'] == 'FreeBSD':
- self.facts['distribution'] = 'FreeBSD'
- self.facts['distribution_release'] = platform.release()
- self.facts['distribution_version'] = platform.version()
- elif self.facts['system'] == 'NetBSD':
- self.facts['distribution'] = 'NetBSD'
- self.facts['distribution_release'] = platform.release()
- self.facts['distribution_version'] = platform.version()
- elif self.facts['system'] == 'OpenBSD':
- self.facts['distribution'] = 'OpenBSD'
- self.facts['distribution_release'] = platform.release()
- rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
- match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
- if match:
- self.facts['distribution_version'] = match.groups()[0]
- else:
- self.facts['distribution_version'] = 'release'
- else:
- dist = platform.dist()
- self.facts['distribution'] = dist[0].capitalize() or 'NA'
- self.facts['distribution_version'] = dist[1] or 'NA'
- self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
- self.facts['distribution_release'] = dist[2] or 'NA'
- # Try to handle the exceptions now ...
- for (path, name) in Facts.OSDIST_LIST:
- if os.path.exists(path):
- if os.path.getsize(path) > 0:
- if self.facts['distribution'] in ('Fedora', ):
- # Once we determine the value is one of these distros
- # we trust the values are always correct
- break
- elif name == 'OracleLinux':
- data = get_file_content(path)
- if 'Oracle Linux' in data:
- self.facts['distribution'] = name
- else:
- self.facts['distribution'] = data.split()[0]
- break
- elif name == 'RedHat':
- data = get_file_content(path)
- if 'Red Hat' in data:
- self.facts['distribution'] = name
- else:
- self.facts['distribution'] = data.split()[0]
- break
- elif name == 'OtherLinux':
- data = get_file_content(path)
- if 'Amazon' in data:
- self.facts['distribution'] = 'Amazon'
- self.facts['distribution_version'] = data.split()[-1]
- break
- elif name == 'OpenWrt':
- data = get_file_content(path)
- if 'OpenWrt' in data:
- self.facts['distribution'] = name
- version = re.search('DISTRIB_RELEASE="(.*)"', data)
- if version:
- self.facts['distribution_version'] = version.groups()[0]
- release = re.search('DISTRIB_CODENAME="(.*)"', data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- break
- elif name == 'Alpine':
- data = get_file_content(path)
- self.facts['distribution'] = name
- self.facts['distribution_version'] = data
- break
- elif name == 'Solaris':
- data = get_file_content(path).split('\n')[0]
- if 'Solaris' in data:
- ora_prefix = ''
- if 'Oracle Solaris' in data:
- data = data.replace('Oracle ','')
- ora_prefix = 'Oracle '
- self.facts['distribution'] = data.split()[0]
- self.facts['distribution_version'] = data.split()[1]
- self.facts['distribution_release'] = ora_prefix + data
- break
-
- uname_rc, uname_out, uname_err = module.run_command(['uname', '-v'])
- distribution_version = None
- if 'SmartOS' in data:
- self.facts['distribution'] = 'SmartOS'
- if os.path.exists('/etc/product'):
- product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
- if 'Image' in product_data:
- distribution_version = product_data.get('Image').split()[-1]
- elif 'OpenIndiana' in data:
- self.facts['distribution'] = 'OpenIndiana'
- elif 'OmniOS' in data:
- self.facts['distribution'] = 'OmniOS'
- distribution_version = data.split()[-1]
- elif uname_rc == 0 and 'NexentaOS_' in uname_out:
- self.facts['distribution'] = 'Nexenta'
- distribution_version = data.split()[-1].lstrip('v')
-
- if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
- self.facts['distribution_release'] = data.strip()
- if distribution_version is not None:
- self.facts['distribution_version'] = distribution_version
- elif uname_rc == 0:
- self.facts['distribution_version'] = uname_out.split('\n')[0].strip()
- break
-
- elif name == 'SuSE':
- data = get_file_content(path)
- if 'suse' in data.lower():
- if path == '/etc/os-release':
- for line in data.splitlines():
- distribution = re.search("^NAME=(.*)", line)
- if distribution:
- self.facts['distribution'] = distribution.group(1).strip('"')
- distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) # example pattern are 13.04 13.0 13
- if distribution_version:
- self.facts['distribution_version'] = distribution_version.group(1)
- if 'open' in data.lower():
- release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- elif 'enterprise' in data.lower():
- release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) # SLES doesn't got funny release names
- if release:
- release = release.group(1)
- else:
- release = "0" # no minor number, so it is the first release
- self.facts['distribution_release'] = release
- break
- elif path == '/etc/SuSE-release':
- if 'open' in data.lower():
- data = data.splitlines()
- distdata = get_file_content(path).split('\n')[0]
- self.facts['distribution'] = distdata.split()[0]
- for line in data:
- release = re.search('CODENAME *= *([^\n]+)', line)
- if release:
- self.facts['distribution_release'] = release.groups()[0].strip()
- elif 'enterprise' in data.lower():
- lines = data.splitlines()
- distribution = lines[0].split()[0]
- if "Server" in data:
- self.facts['distribution'] = "SLES"
- elif "Desktop" in data:
- self.facts['distribution'] = "SLED"
- for line in lines:
- release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
- if release:
- self.facts['distribution_release'] = release.group(1)
- self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
- elif name == 'Debian':
- data = get_file_content(path)
- if 'Ubuntu' in data:
- break # Ubuntu gets correct info from python functions
- elif 'Debian' in data or 'Raspbian' in data:
- release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- break
- elif name == 'Mandriva':
- data = get_file_content(path)
- if 'Mandriva' in data:
- version = re.search('DISTRIB_RELEASE="(.*)"', data)
- if version:
- self.facts['distribution_version'] = version.groups()[0]
- release = re.search('DISTRIB_CODENAME="(.*)"', data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- self.facts['distribution'] = name
- break
- elif name == 'NA':
- data = get_file_content(path)
- for line in data.splitlines():
- distribution = re.search("^NAME=(.*)", line)
- if distribution:
- self.facts['distribution'] = distribution.group(1).strip('"')
- version = re.search("^VERSION=(.*)", line)
- if version:
- self.facts['distribution_version'] = version.group(1).strip('"')
- if self.facts['distribution'].lower() == 'coreos':
- data = get_file_content('/etc/coreos/update.conf')
- release = re.search("^GROUP=(.*)", data)
- if release:
- self.facts['distribution_release'] = release.group(1).strip('"')
- else:
- self.facts['distribution'] = name
- machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
- if machine_id:
- machine_id = machine_id.split('\n')[0]
- self.facts["machine_id"] = machine_id
- self.facts['os_family'] = self.facts['distribution']
- if self.facts['distribution'] in OS_FAMILY:
- self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
-
- def get_cmdline(self):
- data = get_file_content('/proc/cmdline')
- if data:
- self.facts['cmdline'] = {}
- try:
- for piece in shlex.split(data):
- item = piece.split('=', 1)
- if len(item) == 1:
- self.facts['cmdline'][item[0]] = True
- else:
- self.facts['cmdline'][item[0]] = item[1]
- except ValueError, e:
- pass
-
- def get_public_ssh_host_keys(self):
- dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
-
- if self.facts['system'] == 'Darwin':
- dsa_filename = '/etc/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
- dsa = get_file_content(dsa_filename)
- rsa = get_file_content(rsa_filename)
- ecdsa = get_file_content(ecdsa_filename)
- if dsa is None:
- dsa = 'NA'
- else:
- self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
- if rsa is None:
- rsa = 'NA'
- else:
- self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
- if ecdsa is None:
- ecdsa = 'NA'
- else:
- self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
-
- def get_pkg_mgr_facts(self):
- self.facts['pkg_mgr'] = 'unknown'
- for pkg in Facts.PKG_MGRS:
- if os.path.exists(pkg['path']):
- self.facts['pkg_mgr'] = pkg['name']
- if self.facts['system'] == 'OpenBSD':
- self.facts['pkg_mgr'] = 'openbsd_pkg'
-
- def get_lsb_facts(self):
- lsb_path = module.get_bin_path('lsb_release')
- if lsb_path:
- rc, out, err = module.run_command([lsb_path, "-a"])
- if rc == 0:
- self.facts['lsb'] = {}
- for line in out.split('\n'):
- if len(line) < 1 or ':' not in line:
- continue
- value = line.split(':', 1)[1].strip()
- if 'LSB Version:' in line:
- self.facts['lsb']['release'] = value
- elif 'Distributor ID:' in line:
- self.facts['lsb']['id'] = value
- elif 'Description:' in line:
- self.facts['lsb']['description'] = value
- elif 'Release:' in line:
- self.facts['lsb']['release'] = value
- elif 'Codename:' in line:
- self.facts['lsb']['codename'] = value
- if 'lsb' in self.facts and 'release' in self.facts['lsb']:
- self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
- elif lsb_path is None and os.path.exists('/etc/lsb-release'):
- self.facts['lsb'] = {}
- for line in get_file_lines('/etc/lsb-release'):
- value = line.split('=',1)[1].strip()
- if 'DISTRIB_ID' in line:
- self.facts['lsb']['id'] = value
- elif 'DISTRIB_RELEASE' in line:
- self.facts['lsb']['release'] = value
- elif 'DISTRIB_DESCRIPTION' in line:
- self.facts['lsb']['description'] = value
- elif 'DISTRIB_CODENAME' in line:
- self.facts['lsb']['codename'] = value
- else:
- return self.facts
-
- if 'lsb' in self.facts and 'release' in self.facts['lsb']:
- self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
-
-
- def get_selinux_facts(self):
- if not HAVE_SELINUX:
- self.facts['selinux'] = False
- return
- self.facts['selinux'] = {}
- if not selinux.is_selinux_enabled():
- self.facts['selinux']['status'] = 'disabled'
- else:
- self.facts['selinux']['status'] = 'enabled'
- try:
- self.facts['selinux']['policyvers'] = selinux.security_policyvers()
- except OSError, e:
- self.facts['selinux']['policyvers'] = 'unknown'
- try:
- (rc, configmode) = selinux.selinux_getenforcemode()
- if rc == 0:
- self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
- else:
- self.facts['selinux']['config_mode'] = 'unknown'
- except OSError, e:
- self.facts['selinux']['config_mode'] = 'unknown'
- try:
- mode = selinux.security_getenforce()
- self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
- except OSError, e:
- self.facts['selinux']['mode'] = 'unknown'
- try:
- (rc, policytype) = selinux.selinux_getpolicytype()
- if rc == 0:
- self.facts['selinux']['type'] = policytype
- else:
- self.facts['selinux']['type'] = 'unknown'
- except OSError, e:
- self.facts['selinux']['type'] = 'unknown'
-
-
- def get_fips_facts(self):
- self.facts['fips'] = False
- data = get_file_content('/proc/sys/crypto/fips_enabled')
- if data and data == '1':
- self.facts['fips'] = True
-
-
- def get_date_time_facts(self):
- self.facts['date_time'] = {}
-
- now = datetime.datetime.now()
- self.facts['date_time']['year'] = now.strftime('%Y')
- self.facts['date_time']['month'] = now.strftime('%m')
- self.facts['date_time']['weekday'] = now.strftime('%A')
- self.facts['date_time']['day'] = now.strftime('%d')
- self.facts['date_time']['hour'] = now.strftime('%H')
- self.facts['date_time']['minute'] = now.strftime('%M')
- self.facts['date_time']['second'] = now.strftime('%S')
- self.facts['date_time']['epoch'] = now.strftime('%s')
- if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
- self.facts['date_time']['epoch'] = str(int(time.time()))
- self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
- self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
- self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
- self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
- self.facts['date_time']['tz'] = time.strftime("%Z")
- self.facts['date_time']['tz_offset'] = time.strftime("%z")
-
-
- # User
- def get_user_facts(self):
- self.facts['user_id'] = getpass.getuser()
- pwent = pwd.getpwnam(getpass.getuser())
- self.facts['user_uid'] = pwent.pw_uid
- self.facts['user_gid'] = pwent.pw_gid
- self.facts['user_gecos'] = pwent.pw_gecos
- self.facts['user_dir'] = pwent.pw_dir
- self.facts['user_shell'] = pwent.pw_shell
-
- def get_env_facts(self):
- self.facts['env'] = {}
- for k,v in os.environ.iteritems():
- self.facts['env'][k] = v
-
-class Hardware(Facts):
- """
- This is a generic Hardware subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this, it
- should define:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
-
- All subclasses MUST define platform.
- """
- platform = 'Generic'
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Hardware.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxHardware(Hardware):
- """
- Linux-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
-
- In addition, it also defines number of DMI facts and device facts.
- """
-
- platform = 'Linux'
-
- # Originally only had these four as toplevelfacts
- ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
- # Now we have all of these in a dict structure
- MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- self.get_device_facts()
- self.get_uptime_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_memory_facts(self):
- if not os.access("/proc/meminfo", os.R_OK):
- return
-
- memstats = {}
- for line in get_file_lines("/proc/meminfo"):
- data = line.split(":", 1)
- key = data[0]
- if key in self.ORIGINAL_MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- self.facts["%s_mb" % key.lower()] = long(val) / 1024
-
- if key in self.MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- memstats[key.lower()] = long(val) / 1024
-
- if None not in (memstats.get('memtotal'), memstats.get('memfree')):
- memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
- if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
- memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
- if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
- memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
- if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
- memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
-
- self.facts['memory_mb'] = {
- 'real' : {
- 'total': memstats.get('memtotal'),
- 'used': memstats.get('real:used'),
- 'free': memstats.get('memfree'),
- },
- 'nocache' : {
- 'free': memstats.get('nocache:free'),
- 'used': memstats.get('nocache:used'),
- },
- 'swap' : {
- 'total': memstats.get('swaptotal'),
- 'free': memstats.get('swapfree'),
- 'used': memstats.get('swap:used'),
- 'cached': memstats.get('swapcached'),
- },
- }
-
- def get_cpu_facts(self):
- i = 0
- vendor_id_occurrence = 0
- model_name_occurrence = 0
- physid = 0
- coreid = 0
- sockets = {}
- cores = {}
-
- xen = False
- xen_paravirt = False
- try:
- if os.path.exists('/proc/xen'):
- xen = True
- else:
- for line in get_file_lines('/sys/hypervisor/type'):
- if line.strip() == 'xen':
- xen = True
- # Only interested in the first line
- break
- except IOError:
- pass
-
- if not os.access("/proc/cpuinfo", os.R_OK):
- return
- self.facts['processor'] = []
- for line in get_file_lines('/proc/cpuinfo'):
- data = line.split(":", 1)
- key = data[0].strip()
-
- if xen:
- if key == 'flags':
- # Check for vme cpu flag, Xen paravirt does not expose this.
- # Need to detect Xen paravirt because it exposes cpuinfo
- # differently than Xen HVM or KVM and causes reporting of
- # only a single cpu core.
- if 'vme' not in data:
- xen_paravirt = True
-
- # model name is for Intel arch, Processor (mind the uppercase P)
- # works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor' or key == 'vendor_id':
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(data[1].strip())
- if key == 'vendor_id':
- vendor_id_occurrence += 1
- if key == 'model name':
- model_name_occurrence += 1
- i += 1
- elif key == 'physical id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- elif key == 'core id':
- coreid = data[1].strip()
- if coreid not in sockets:
- cores[coreid] = 1
- elif key == 'cpu cores':
- sockets[physid] = int(data[1].strip())
- elif key == 'siblings':
- cores[coreid] = int(data[1].strip())
- elif key == '# processors':
- self.facts['processor_cores'] = int(data[1].strip())
-
- if vendor_id_occurrence == model_name_occurrence:
- i = vendor_id_occurrence
-
- if self.facts['architecture'] != 's390x':
- if xen_paravirt:
- self.facts['processor_count'] = i
- self.facts['processor_cores'] = i
- self.facts['processor_threads_per_core'] = 1
- self.facts['processor_vcpus'] = i
- else:
- self.facts['processor_count'] = sockets and len(sockets) or i
- self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
- self.facts['processor_threads_per_core'] = ((cores.values() and
- cores.values()[0] or 1) / self.facts['processor_cores'])
- self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
- self.facts['processor_count'] * self.facts['processor_cores'])
-
- def get_dmi_facts(self):
- ''' learn dmi facts from system
-
- Try /sys first for dmi related facts.
- If that is not available, fall back to dmidecode executable '''
-
- if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
- # Use kernel DMI info, if available
-
- # DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
- FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
- "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
- "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
- "All In One", "Sub Notebook", "Space-saving", "Lunch Box",
- "Main Server Chassis", "Expansion Chassis", "Sub Chassis",
- "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
- "Rack Mount Chassis", "Sealed-case PC", "Multi-system",
- "CompactPCI", "AdvancedTCA", "Blade" ]
-
- DMI_DICT = {
- 'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
- 'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
- 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
- 'product_name': '/sys/devices/virtual/dmi/id/product_name',
- 'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
- 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
- 'product_version': '/sys/devices/virtual/dmi/id/product_version',
- 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
- }
-
- for (key,path) in DMI_DICT.items():
- data = get_file_content(path)
- if data is not None:
- if key == 'form_factor':
- try:
- self.facts['form_factor'] = FORM_FACTOR[int(data)]
- except IndexError, e:
- self.facts['form_factor'] = 'unknown (%s)' % data
- else:
- self.facts[key] = data
- else:
- self.facts[key] = 'NA'
-
- else:
- # Fall back to using dmidecode, if available
- dmi_bin = module.get_bin_path('dmidecode')
- DMI_DICT = {
- 'bios_date': 'bios-release-date',
- 'bios_version': 'bios-version',
- 'form_factor': 'chassis-type',
- 'product_name': 'system-product-name',
- 'product_serial': 'system-serial-number',
- 'product_uuid': 'system-uuid',
- 'product_version': 'system-version',
- 'system_vendor': 'system-manufacturer'
- }
- for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
- try:
- json.dumps(thisvalue)
- except UnicodeDecodeError:
- thisvalue = "NA"
-
- self.facts[k] = thisvalue
- else:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- mtab = get_file_content('/etc/mtab', '')
- for line in mtab.split('\n'):
- if line.startswith('/'):
- fields = line.rstrip('\n').split()
- if(fields[2] != 'none'):
- size_total = None
- size_available = None
- try:
- statvfs_result = os.statvfs(fields[1])
- size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
- size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
- except OSError, e:
- continue
-
- uuid = 'NA'
- lsblkPath = module.get_bin_path("lsblk")
- if lsblkPath:
- rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True)
-
- if rc == 0:
- uuid = out.strip()
-
- self.facts['mounts'].append(
- {'mount': fields[1],
- 'device':fields[0],
- 'fstype': fields[2],
- 'options': fields[3],
- # statvfs data
- 'size_total': size_total,
- 'size_available': size_available,
- 'uuid': uuid,
- })
-
- def get_device_facts(self):
- self.facts['devices'] = {}
- lspci = module.get_bin_path('lspci')
- if lspci:
- rc, pcidata, err = module.run_command([lspci, '-D'])
- else:
- pcidata = None
-
- try:
- block_devs = os.listdir("/sys/block")
- except OSError:
- return
-
- for block in block_devs:
- virtual = 1
- sysfs_no_links = 0
- try:
- path = os.readlink(os.path.join("/sys/block/", block))
- except OSError, e:
- if e.errno == errno.EINVAL:
- path = block
- sysfs_no_links = 1
- else:
- continue
- if "virtual" in path:
- continue
- sysdir = os.path.join("/sys/block", path)
- if sysfs_no_links == 1:
- for folder in os.listdir(sysdir):
- if "device" in folder:
- virtual = 0
- break
- if virtual:
- continue
- d = {}
- diskname = os.path.basename(sysdir)
- for key in ['vendor', 'model']:
- d[key] = get_file_content(sysdir + "/device/" + key)
-
- for key,test in [ ('removable','/removable'), \
- ('support_discard','/queue/discard_granularity'),
- ]:
- d[key] = get_file_content(sysdir + test)
-
- d['partitions'] = {}
- for folder in os.listdir(sysdir):
- m = re.search("(" + diskname + "\d+)", folder)
- if m:
- part = {}
- partname = m.group(1)
- part_sysdir = sysdir + "/" + partname
-
- part['start'] = get_file_content(part_sysdir + "/start",0)
- part['sectors'] = get_file_content(part_sysdir + "/size",0)
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
- if not part['sectorsize']:
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
- part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
- d['partitions'][partname] = part
-
- d['rotational'] = get_file_content(sysdir + "/queue/rotational")
- d['scheduler_mode'] = ""
- scheduler = get_file_content(sysdir + "/queue/scheduler")
- if scheduler is not None:
- m = re.match(".*?(\[(.*)\])", scheduler)
- if m:
- d['scheduler_mode'] = m.group(2)
-
- d['sectors'] = get_file_content(sysdir + "/size")
- if not d['sectors']:
- d['sectors'] = 0
- d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
- if not d['sectorsize']:
- d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
- d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
-
- d['host'] = ""
-
- # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
- m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
- if m and pcidata:
- pciid = m.group(1)
- did = re.escape(pciid)
- m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
- d['host'] = m.group(1)
-
- d['holders'] = []
- if os.path.isdir(sysdir + "/holders"):
- for folder in os.listdir(sysdir + "/holders"):
- if not folder.startswith("dm-"):
- continue
- name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
- if name:
- d['holders'].append(name)
- else:
- d['holders'].append(folder)
-
- self.facts['devices'][diskname] = d
-
- def get_uptime_facts(self):
- uptime_seconds_string = get_file_content('/proc/uptime').split(' ')[0]
- self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
-
-class SunOSHardware(Hardware):
- """
- In addition to the generic memory and cpu facts, this also sets
- swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
- """
- platform = 'SunOS'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_cpu_facts(self):
- physid = 0
- sockets = {}
- rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
- self.facts['processor'] = []
- for line in out.split('\n'):
- if len(line) < 1:
- continue
- data = line.split(None, 1)
- key = data[0].strip()
- # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
- if key == 'module:':
- brand = ''
- elif key == 'brand':
- brand = data[1].strip()
- elif key == 'clock_MHz':
- clock_mhz = data[1].strip()
- elif key == 'implementation':
- processor = brand or data[1].strip()
- # Add clock speed to description for SPARC CPU
- if self.facts['machine'] != 'i86pc':
- processor += " @ " + clock_mhz + "MHz"
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(processor)
- elif key == 'chip_id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- else:
- sockets[physid] += 1
- # Counting cores on Solaris can be complicated.
- # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
- # Treat 'processor_count' as physical sockets and 'processor_cores' as
- # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
- # these processors have: sockets -> cores -> threads/virtual CPU.
- if len(sockets) > 0:
- self.facts['processor_count'] = len(sockets)
- self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
- else:
- self.facts['processor_cores'] = 'NA'
- self.facts['processor_count'] = len(self.facts['processor'])
-
- def get_memory_facts(self):
- rc, out, err = module.run_command(["/usr/sbin/prtconf"])
- for line in out.split('\n'):
- if 'Memory size' in line:
- self.facts['memtotal_mb'] = line.split()[2]
- rc, out, err = module.run_command("/usr/sbin/swap -s")
- allocated = long(out.split()[1][:-1])
- reserved = long(out.split()[5][:-1])
- used = long(out.split()[8][:-1])
- free = long(out.split()[10][:-1])
- self.facts['swapfree_mb'] = free / 1024
- self.facts['swaptotal_mb'] = (free + used) / 1024
- self.facts['swap_allocated_mb'] = allocated / 1024
- self.facts['swap_reserved_mb'] = reserved / 1024
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- # For a detailed format description see mnttab(4)
- # special mount_point fstype options time
- fstab = get_file_content('/etc/mnttab')
- if fstab:
- for line in fstab.split('\n'):
- fields = line.rstrip('\n').split('\t')
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4]})
-
-class OpenBSDHardware(Hardware):
- """
- OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - processor_speed
- - devices
- """
- platform = 'OpenBSD'
- DMESG_BOOT = '/var/run/dmesg.boot'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.sysctl = self.get_sysctl()
- self.get_memory_facts()
- self.get_processor_facts()
- self.get_device_facts()
- self.get_mount_facts()
- return self.facts
-
- def get_sysctl(self):
- rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
- if rc != 0:
- return dict()
- sysctl = dict()
- for line in out.splitlines():
- (key, value) = line.split('=')
- sysctl[key] = value.strip()
- return sysctl
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- if fields[1] == 'none' or fields[3] == 'xx':
- continue
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
- def get_memory_facts(self):
- # Get free memory. vmstat output looks like:
- # procs memory page disks traps cpu
- # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
- # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
- rc, out, err = module.run_command("/usr/bin/vmstat")
- if rc == 0:
- self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
- self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
-
- # Get swapctl info. swapctl output looks like:
- # total: 69268 1K-blocks allocated, 0 used, 69268 available
- # And for older OpenBSD:
- # total: 69268k bytes allocated = 0k used, 69268k available
- rc, out, err = module.run_command("/sbin/swapctl -sk")
- if rc == 0:
- swaptrans = maketrans(' ', ' ')
- data = out.split()
- self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
- self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
-
- def get_processor_facts(self):
- processor = []
- dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
- if not dmesg_boot:
- rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
- i = 0
- for line in dmesg_boot.splitlines():
- if line.split(' ', 1)[0] == 'cpu%i:' % i:
- processor.append(line.split(' ', 1)[1])
- i = i + 1
- processor_count = i
- self.facts['processor'] = processor
- self.facts['processor_count'] = processor_count
- # I found no way to figure out the number of Cores per CPU in OpenBSD
- self.facts['processor_cores'] = 'NA'
-
- def get_device_facts(self):
- devices = []
- devices.extend(self.sysctl['hw.disknames'].split(','))
- self.facts['devices'] = devices
-
-class FreeBSDHardware(Hardware):
- """
- FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - devices
- """
- platform = 'FreeBSD'
- DMESG_BOOT = '/var/run/dmesg.boot'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- self.get_device_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_cpu_facts(self):
- self.facts['processor'] = []
- rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
- self.facts['processor_count'] = out.strip()
-
- dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
- if not dmesg_boot:
- rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
- for line in dmesg_boot.split('\n'):
- if 'CPU:' in line:
- cpu = re.sub(r'CPU:\s+', r"", line)
- self.facts['processor'].append(cpu.strip())
- if 'Logical CPUs per core' in line:
- self.facts['processor_cores'] = line.split()[4]
-
-
- def get_memory_facts(self):
- rc, out, err = module.run_command("/sbin/sysctl vm.stats")
- for line in out.split('\n'):
- data = line.split()
- if 'vm.stats.vm.v_page_size' in line:
- pagesize = long(data[1])
- if 'vm.stats.vm.v_page_count' in line:
- pagecount = long(data[1])
- if 'vm.stats.vm.v_free_count' in line:
- freecount = long(data[1])
- self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
- self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
- # Get swapinfo. swapinfo output looks like:
- # Device 1M-blocks Used Avail Capacity
- # /dev/ada0p3 314368 0 314368 0%
- #
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
- lines = out.split('\n')
- if len(lines[-1]) == 0:
- lines.pop()
- data = lines[-1].split()
- self.facts['swaptotal_mb'] = data[1]
- self.facts['swapfree_mb'] = data[3]
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
- def get_device_facts(self):
- sysdir = '/dev'
- self.facts['devices'] = {}
- drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
- slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
- if os.path.isdir(sysdir):
- dirlist = sorted(os.listdir(sysdir))
- for device in dirlist:
- d = drives.match(device)
- if d:
- self.facts['devices'][d.group(1)] = []
- s = slices.match(device)
- if s:
- self.facts['devices'][d.group(1)].append(s.group(1))
-
- def get_dmi_facts(self):
- ''' learn dmi facts from system
-
- Use dmidecode executable if available'''
-
- # Fall back to using dmidecode, if available
- dmi_bin = module.get_bin_path('dmidecode')
- DMI_DICT = dict(
- bios_date='bios-release-date',
- bios_version='bios-version',
- form_factor='chassis-type',
- product_name='system-product-name',
- product_serial='system-serial-number',
- product_uuid='system-uuid',
- product_version='system-version',
- system_vendor='system-manufacturer'
- )
- for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
- try:
- json.dumps(self.facts[k])
- except UnicodeDecodeError:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
-
-
-class NetBSDHardware(Hardware):
- """
- NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - devices
- """
- platform = 'NetBSD'
- MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_cpu_facts(self):
-
- i = 0
- physid = 0
- sockets = {}
- if not os.access("/proc/cpuinfo", os.R_OK):
- return
- self.facts['processor'] = []
- for line in get_file_lines("/proc/cpuinfo"):
- data = line.split(":", 1)
- key = data[0].strip()
- # model name is for Intel arch, Processor (mind the uppercase P)
- # works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor':
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(data[1].strip())
- i += 1
- elif key == 'physical id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- elif key == 'cpu cores':
- sockets[physid] = int(data[1].strip())
- if len(sockets) > 0:
- self.facts['processor_count'] = len(sockets)
- self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
- else:
- self.facts['processor_count'] = i
- self.facts['processor_cores'] = 'NA'
-
- def get_memory_facts(self):
- if not os.access("/proc/meminfo", os.R_OK):
- return
- for line in get_file_lines("/proc/meminfo"):
- data = line.split(":", 1)
- key = data[0]
- if key in NetBSDHardware.MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- self.facts["%s_mb" % key.lower()] = long(val) / 1024
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
-class AIX(Hardware):
- """
- AIX-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- """
- platform = 'AIX'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- return self.facts
-
- def get_cpu_facts(self):
- self.facts['processor'] = []
-
-
- rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
- if out:
- i = 0
- for line in out.split('\n'):
-
- if 'Available' in line:
- if i == 0:
- data = line.split(' ')
- cpudev = data[0]
-
- i += 1
- self.facts['processor_count'] = int(i)
-
- rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
-
- data = out.split(' ')
- self.facts['processor'] = data[1]
-
- rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
-
- data = out.split(' ')
- self.facts['processor_cores'] = int(data[1])
-
- def get_memory_facts(self):
- pagesize = 4096
- rc, out, err = module.run_command("/usr/bin/vmstat -v")
- for line in out.split('\n'):
- data = line.split()
- if 'memory pages' in line:
- pagecount = long(data[0])
- if 'free pages' in line:
- freecount = long(data[0])
- self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
- self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
- # Get swapinfo. swapinfo output looks like:
- # Device 1M-blocks Used Avail Capacity
- # /dev/ada0p3 314368 0 314368 0%
- #
- rc, out, err = module.run_command("/usr/sbin/lsps -s")
- if out:
- lines = out.split('\n')
- data = lines[1].split()
- swaptotal_mb = long(data[0].rstrip('MB'))
- percused = int(data[1].rstrip('%'))
- self.facts['swaptotal_mb'] = swaptotal_mb
- self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
-
- def get_dmi_facts(self):
- rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
- data = out.split()
- self.facts['firmware_version'] = data[1].strip('IBM,')
-
-class HPUX(Hardware):
- """
- HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor
- - processor_cores
- - processor_count
- - model
- - firmware
- """
-
- platform = 'HP-UX'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_hw_facts()
- return self.facts
-
- def get_cpu_facts(self):
- if self.facts['architecture'] == '9000/800':
- rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip())
- #Working with machinfo mess
- elif self.facts['architecture'] == 'ia64':
- if self.facts['distribution_version'] == "B.11.23":
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip().split('=')[1])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
- self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
- rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
- self.facts['processor_cores'] = int(out.strip())
- if self.facts['distribution_version'] == "B.11.31":
- #if machinfo return cores strings release B.11.31 > 1204
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
- if out.strip()== '0':
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip().split(" ")[0])
- #If hyperthreading is active divide cores by 2
- rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
- data = re.sub(' +',' ',out).strip().split(' ')
- if len(data) == 1:
- hyperthreading = 'OFF'
- else:
- hyperthreading = data[1]
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
- data = out.strip().split(" ")
- if hyperthreading == 'ON':
- self.facts['processor_cores'] = int(data[0])/2
- else:
- if len(data) == 1:
- self.facts['processor_cores'] = self.facts['processor_count']
- else:
- self.facts['processor_cores'] = int(data[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
- self.facts['processor'] = out.strip()
- else:
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip().split(" ")[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
- self.facts['processor_cores'] = int(out.strip().split(" ")[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
- self.facts['processor'] = out.strip()
-
- def get_memory_facts(self):
- pagesize = 4096
- rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
- data = int(re.sub(' +',' ',out).split(' ')[5].strip())
- self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
- if self.facts['architecture'] == '9000/800':
- try:
- rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
- data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
- self.facts['memtotal_mb'] = int(data) / 1024
- except AttributeError:
- #For systems where memory details aren't sent to syslog or the log has rotated, use parsed
- #adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
- if os.access("/dev/kmem", os.R_OK):
- rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
- if not err:
- data = out
- self.facts['memtotal_mb'] = int(data) / 256
- else:
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
- data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
- self.facts['memtotal_mb'] = int(data)
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
- self.facts['swaptotal_mb'] = int(out.strip())
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
- swap = 0
- for line in out.strip().split('\n'):
- swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
- self.facts['swapfree_mb'] = swap
-
- def get_hw_facts(self):
- rc, out, err = module.run_command("model")
- self.facts['model'] = out.strip()
- if self.facts['architecture'] == 'ia64':
- separator = ':'
- if self.facts['distribution_version'] == "B.11.23":
- separator = '='
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
- self.facts['firmware_version'] = out.split(separator)[1].strip()
-
-
-class Darwin(Hardware):
- """
- Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- - processor
- - processor_cores
- - memtotal_mb
- - memfree_mb
- - model
- - osversion
- - osrevision
- """
- platform = 'Darwin'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.sysctl = self.get_sysctl()
- self.get_mac_facts()
- self.get_cpu_facts()
- self.get_memory_facts()
- return self.facts
-
- def get_sysctl(self):
- rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
- if rc != 0:
- return dict()
- sysctl = dict()
- for line in out.splitlines():
- if line.rstrip("\n"):
- (key, value) = re.split(' = |: ', line, maxsplit=1)
- sysctl[key] = value.strip()
- return sysctl
-
- def get_system_profile(self):
- rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
- if rc != 0:
- return dict()
- system_profile = dict()
- for line in out.splitlines():
- if ': ' in line:
- (key, value) = line.split(': ', 1)
- system_profile[key.strip()] = ' '.join(value.strip().split())
- return system_profile
-
- def get_mac_facts(self):
- rc, out, err = module.run_command("sysctl hw.model")
- if rc == 0:
- self.facts['model'] = out.splitlines()[-1].split()[1]
- self.facts['osversion'] = self.sysctl['kern.osversion']
- self.facts['osrevision'] = self.sysctl['kern.osrevision']
-
- def get_cpu_facts(self):
- if 'machdep.cpu.brand_string' in self.sysctl: # Intel
- self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
- self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
- else: # PowerPC
- system_profile = self.get_system_profile()
- self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
- self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
-
- def get_memory_facts(self):
- self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
-
- rc, out, err = module.run_command("sysctl hw.usermem")
- if rc == 0:
- self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
-
-
-class Network(Facts):
- """
- This is a generic Network subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this,
- you must define:
- - interfaces (a list of interface names)
- - interface_<name> dictionary of ipv4, ipv6, and mac address information.
-
- All subclasses MUST define platform.
- """
- platform = 'Generic'
-
- IPV6_SCOPE = { '0' : 'global',
- '10' : 'host',
- '20' : 'link',
- '40' : 'admin',
- '50' : 'site',
- '80' : 'organization' }
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Network.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self, module):
- self.module = module
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxNetwork(Network):
- """
- This is a Linux-specific subclass of Network. It defines
- - interfaces (a list of interface names)
- - interface_<name> dictionary of ipv4, ipv6, and mac address information.
- - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- - ipv4_address and ipv6_address: the first non-local address for each family.
- """
- platform = 'Linux'
-
- def __init__(self, module):
- Network.__init__(self, module)
-
- def populate(self):
- ip_path = self.module.get_bin_path('ip')
- if ip_path is None:
- return self.facts
- default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
- interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
- self.facts['interfaces'] = interfaces.keys()
- for iface in interfaces:
- self.facts[iface] = interfaces[iface]
- self.facts['default_ipv4'] = default_ipv4
- self.facts['default_ipv6'] = default_ipv6
- self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
- self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
- return self.facts
-
- def get_default_interfaces(self, ip_path):
- # Use the commands:
- # ip -4 route get 8.8.8.8 -> Google public DNS
- # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
- # to find out the default outgoing interface, address, and gateway
- command = dict(
- v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
- v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
- )
- interface = dict(v4 = {}, v6 = {})
- for v in 'v4', 'v6':
- if v == 'v6' and self.facts['os_family'] == 'RedHat' \
- and self.facts['distribution_version'].startswith('4.'):
- continue
- if v == 'v6' and not socket.has_ipv6:
- continue
- rc, out, err = module.run_command(command[v])
- if not out:
- # v6 routing may result in
- # RTNETLINK answers: Invalid argument
- continue
- words = out.split('\n')[0].split()
- # A valid output starts with the queried address on the first line
- if len(words) > 0 and words[0] == command[v][-1]:
- for i in range(len(words) - 1):
- if words[i] == 'dev':
- interface[v]['interface'] = words[i+1]
- elif words[i] == 'src':
- interface[v]['address'] = words[i+1]
- elif words[i] == 'via' and words[i+1] != command[v][-1]:
- interface[v]['gateway'] = words[i+1]
- return interface['v4'], interface['v6']
-
- def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
- interfaces = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
-
- for path in glob.glob('/sys/class/net/*'):
- if not os.path.isdir(path):
- continue
- device = os.path.basename(path)
- interfaces[device] = { 'device': device }
- if os.path.exists(os.path.join(path, 'address')):
- macaddress = get_file_content(os.path.join(path, 'address'), default='')
- if macaddress and macaddress != '00:00:00:00:00:00':
- interfaces[device]['macaddress'] = macaddress
- if os.path.exists(os.path.join(path, 'mtu')):
- interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
- if os.path.exists(os.path.join(path, 'operstate')):
- interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
-# if os.path.exists(os.path.join(path, 'carrier')):
-# interfaces[device]['link'] = get_file_content(os.path.join(path, 'carrier')) == '1'
- if os.path.exists(os.path.join(path, 'device','driver', 'module')):
- interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
- if os.path.exists(os.path.join(path, 'type')):
- _type = get_file_content(os.path.join(path, 'type'))
- if _type == '1':
- interfaces[device]['type'] = 'ether'
- elif _type == '512':
- interfaces[device]['type'] = 'ppp'
- elif _type == '772':
- interfaces[device]['type'] = 'loopback'
- if os.path.exists(os.path.join(path, 'bridge')):
- interfaces[device]['type'] = 'bridge'
- interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
- if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
- interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
- if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
- interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
- if os.path.exists(os.path.join(path, 'bonding')):
- interfaces[device]['type'] = 'bonding'
- interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
- interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
- interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
- interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
- primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
- if primary:
- interfaces[device]['primary'] = primary
- path = os.path.join(path, 'bonding', 'all_slaves_active')
- if os.path.exists(path):
- interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
-
- # Check whether an interface is in promiscuous mode
- if os.path.exists(os.path.join(path,'flags')):
- promisc_mode = False
- # The second byte indicates whether the interface is in promiscuous mode.
- # 1 = promisc
- # 0 = no promisc
- data = int(get_file_content(os.path.join(path, 'flags')),16)
- promisc_mode = (data & 0x0100 > 0)
- interfaces[device]['promisc'] = promisc_mode
-
- def parse_ip_output(output, secondary=False):
- for line in output.split('\n'):
- if not line:
- continue
- words = line.split()
- if words[0] == 'inet':
- if '/' in words[1]:
- address, netmask_length = words[1].split('/')
- else:
- # pointopoint interfaces do not have a prefix
- address = words[1]
- netmask_length = "32"
- address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
- netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
- netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
- network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
- iface = words[-1]
- if iface != device:
- interfaces[iface] = {}
- if not secondary and "ipv4" not in interfaces[iface]:
- interfaces[iface]['ipv4'] = {'address': address,
- 'netmask': netmask,
- 'network': network}
- else:
- if "ipv4_secondaries" not in interfaces[iface]:
- interfaces[iface]["ipv4_secondaries"] = []
- interfaces[iface]["ipv4_secondaries"].append({
- 'address': address,
- 'netmask': netmask,
- 'network': network,
- })
-
- # add this secondary IP to the main device
- if secondary:
- if "ipv4_secondaries" not in interfaces[device]:
- interfaces[device]["ipv4_secondaries"] = []
- interfaces[device]["ipv4_secondaries"].append({
- 'address': address,
- 'netmask': netmask,
- 'network': network,
- })
-
- # If this is the default address, update default_ipv4
- if 'address' in default_ipv4 and default_ipv4['address'] == address:
- default_ipv4['netmask'] = netmask
- default_ipv4['network'] = network
- default_ipv4['macaddress'] = macaddress
- default_ipv4['mtu'] = interfaces[device]['mtu']
- default_ipv4['type'] = interfaces[device].get("type", "unknown")
- default_ipv4['alias'] = words[-1]
- if not address.startswith('127.'):
- ips['all_ipv4_addresses'].append(address)
- elif words[0] == 'inet6':
- address, prefix = words[1].split('/')
- scope = words[3]
- if 'ipv6' not in interfaces[device]:
- interfaces[device]['ipv6'] = []
- interfaces[device]['ipv6'].append({
- 'address' : address,
- 'prefix' : prefix,
- 'scope' : scope
- })
- # If this is the default address, update default_ipv6
- if 'address' in default_ipv6 and default_ipv6['address'] == address:
- default_ipv6['prefix'] = prefix
- default_ipv6['scope'] = scope
- default_ipv6['macaddress'] = macaddress
- default_ipv6['mtu'] = interfaces[device]['mtu']
- default_ipv6['type'] = interfaces[device].get("type", "unknown")
- if not address == '::1':
- ips['all_ipv6_addresses'].append(address)
-
- ip_path = module.get_bin_path("ip")
-
- args = [ip_path, 'addr', 'show', 'primary', device]
- rc, stdout, stderr = self.module.run_command(args)
- primary_data = stdout
-
- args = [ip_path, 'addr', 'show', 'secondary', device]
- rc, stdout, stderr = self.module.run_command(args)
- secondary_data = stdout
-
- parse_ip_output(primary_data)
- parse_ip_output(secondary_data, secondary=True)
-
- # replace : by _ in interface name since they are hard to use in template
- new_interfaces = {}
- for i in interfaces:
- if ':' in i:
- new_interfaces[i.replace(':','_')] = interfaces[i]
- else:
- new_interfaces[i] = interfaces[i]
- return new_interfaces, ips
-
-class GenericBsdIfconfigNetwork(Network):
- """
- This is a generic BSD subclass of Network using the ifconfig command.
- It defines
- - interfaces (a list of interface names)
- - interface_<name> dictionary of ipv4, ipv6, and mac address information.
- - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- It currently does not define
- - default_ipv4 and default_ipv6
- - type, mtu and network on interfaces
- """
- platform = 'Generic_BSD_Ifconfig'
-
- def __init__(self, module):
- Network.__init__(self, module)
-
- def populate(self):
-
- ifconfig_path = module.get_bin_path('ifconfig')
-
- if ifconfig_path is None:
- return self.facts
- route_path = module.get_bin_path('route')
-
- if route_path is None:
- return self.facts
-
- default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
- interfaces, ips = self.get_interfaces_info(ifconfig_path)
- self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
- self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
- self.facts['interfaces'] = interfaces.keys()
-
- for iface in interfaces:
- self.facts[iface] = interfaces[iface]
-
- self.facts['default_ipv4'] = default_ipv4
- self.facts['default_ipv6'] = default_ipv6
- self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
- self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
-
- return self.facts
-
- def get_default_interfaces(self, route_path):
-
- # Use the commands:
- # route -n get 8.8.8.8 -> Google public DNS
- # route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
- # to find out the default outgoing interface, address, and gateway
-
- command = dict(
- v4 = [route_path, '-n', 'get', '8.8.8.8'],
- v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
- )
-
- interface = dict(v4 = {}, v6 = {})
-
- for v in 'v4', 'v6':
-
- if v == 'v6' and not socket.has_ipv6:
- continue
- rc, out, err = module.run_command(command[v])
- if not out:
- # v6 routing may result in
- # RTNETLINK answers: Invalid argument
- continue
- lines = out.split('\n')
- for line in lines:
- words = line.split()
- # Collect output from route command
- if len(words) > 1:
- if words[0] == 'interface:':
- interface[v]['interface'] = words[1]
- if words[0] == 'gateway:':
- interface[v]['gateway'] = words[1]
-
- return interface['v4'], interface['v6']
-
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
- # when running the command 'ifconfig'.
- # Solaris must explicitly run the command 'ifconfig -a'.
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- if words[0] == 'pass':
- continue
- elif re.match('^\S', line) and len(words) > 3:
- current_if = self.parse_interface_line(words)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- return interfaces, ips
-
- def parse_interface_line(self, words):
- device = words[0][0:-1]
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- current_if['flags'] = self.get_options(words[1])
- current_if['macaddress'] = 'unknown' # will be overwritten later
-
- if len(words) >= 5 : # Newer FreeBSD versions
- current_if['metric'] = words[3]
- current_if['mtu'] = words[5]
- else:
- current_if['mtu'] = words[3]
-
- return current_if
-
- def parse_options_line(self, words, current_if, ips):
- # Mac has options like this...
- current_if['options'] = self.get_options(words[0])
-
- def parse_nd6_line(self, words, current_if, ips):
- # FreeBSD has options like this...
- current_if['options'] = self.get_options(words[1])
-
- def parse_ether_line(self, words, current_if, ips):
- current_if['macaddress'] = words[1]
-
- def parse_media_line(self, words, current_if, ips):
- # not sure if this is useful - we also drop information
- current_if['media'] = words[1]
- if len(words) > 2:
- current_if['media_select'] = words[2]
- if len(words) > 3:
- current_if['media_type'] = words[3][1:]
- if len(words) > 4:
- current_if['media_options'] = self.get_options(words[4])
-
- def parse_status_line(self, words, current_if, ips):
- current_if['status'] = words[1]
-
- def parse_lladdr_line(self, words, current_if, ips):
- current_if['lladdr'] = words[1]
-
- def parse_inet_line(self, words, current_if, ips):
- address = {'address': words[1]}
- # deal with hex netmask
- if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
- words[3] = '0x' + words[3]
- if words[3].startswith('0x'):
- address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
- else:
- # otherwise assume this is a dotted quad
- address['netmask'] = words[3]
- # calculate the network
- address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
- netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
- address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
- # broadcast may be given or we need to calculate
- if len(words) > 5:
- address['broadcast'] = words[5]
- else:
- address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
- # add to our list of addresses
- if not words[1].startswith('127.'):
- ips['all_ipv4_addresses'].append(address['address'])
- current_if['ipv4'].append(address)
-
- def parse_inet6_line(self, words, current_if, ips):
- address = {'address': words[1]}
- if (len(words) >= 4) and (words[2] == 'prefixlen'):
- address['prefix'] = words[3]
- if (len(words) >= 6) and (words[4] == 'scopeid'):
- address['scope'] = words[5]
- localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
- if address['address'] not in localhost6:
- ips['all_ipv6_addresses'].append(address['address'])
- current_if['ipv6'].append(address)
-
- def parse_unknown_line(self, words, current_if, ips):
- # we are going to ignore unknown lines here - this may be
- # a bad idea - but you can override it in your subclass
- pass
-
- def get_options(self, option_string):
- start = option_string.find('<') + 1
- end = option_string.rfind('>')
- if (start > 0) and (end > 0) and (end > start + 1):
- option_csv = option_string[start:end]
- return option_csv.split(',')
- else:
- return []
-
- def merge_default_interface(self, defaults, interfaces, ip_type):
- if not 'interface' in defaults.keys():
- return
- if not defaults['interface'] in interfaces:
- return
- ifinfo = interfaces[defaults['interface']]
- # copy all the interface values across except addresses
- for item in ifinfo.keys():
- if item != 'ipv4' and item != 'ipv6':
- defaults[item] = ifinfo[item]
- if len(ifinfo[ip_type]) > 0:
- for item in ifinfo[ip_type][0].keys():
- defaults[item] = ifinfo[ip_type][0][item]
-
-class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the Mac OS X/Darwin Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged
- """
- platform = 'Darwin'
-
- # media line is different to the default FreeBSD one
- def parse_media_line(self, words, current_if, ips):
- # not sure if this is useful - we also drop information
- current_if['media'] = 'Unknown' # Mac does not give us this
- current_if['media_select'] = words[1]
- if len(words) > 2:
- current_if['media_type'] = words[2][1:-1]
- if len(words) > 3:
- current_if['media_options'] = self.get_options(words[3])
-
-
-class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the FreeBSD Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged.
- """
- platform = 'FreeBSD'
-
-class AIXNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the AIX Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged.
- """
- platform = 'AIX'
-
- # AIX 'ifconfig -a' does not have three words in the interface line
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- # only this condition differs from GenericBsdIfconfigNetwork
- if re.match('^\w*\d*:', line):
- current_if = self.parse_interface_line(words)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
- uname_path = module.get_bin_path('uname')
- if uname_path:
- rc, out, err = module.run_command([uname_path, '-W'])
- # don't bother with wpars it does not work
- # zero means not in wpar
- if out.split()[0] == '0':
- if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
- entstat_path = module.get_bin_path('entstat')
- if entstat_path:
- rc, out, err = module.run_command([entstat_path, current_if['device'] ])
- if rc != 0:
- break
- for line in out.split('\n'):
- if not line:
- pass
- buff = re.match('^Hardware Address: (.*)', line)
- if buff:
- current_if['macaddress'] = buff.group(1)
-
- buff = re.match('^Device Type:', line)
- if buff and re.match('.*Ethernet', line):
- current_if['type'] = 'ether'
- # device must have mtu attribute in ODM
- if 'mtu' not in current_if:
- lsattr_path = module.get_bin_path('lsattr')
- if lsattr_path:
- rc, out, err = module.run_command([lsattr_path,'-El', current_if['device'] ])
- if rc != 0:
- break
- for line in out.split('\n'):
- if line:
- words = line.split()
- if words[0] == 'mtu':
- current_if['mtu'] = words[1]
- return interfaces, ips
-
- # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
- def parse_interface_line(self, words):
- device = words[0][0:-1]
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- current_if['flags'] = self.get_options(words[1])
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
-class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the OpenBSD Network Class.
- It uses the GenericBsdIfconfigNetwork.
- """
- platform = 'OpenBSD'
-
- # Return macaddress instead of lladdr
- def parse_lladdr_line(self, words, current_if, ips):
- current_if['macaddress'] = words[1]
-
-class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the SunOS Network Class.
- It uses the GenericBsdIfconfigNetwork.
-
- Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
- so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
- """
- platform = 'SunOS'
-
- # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
- # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
- # 'parse_interface_line()' checks for previously seen interfaces before defining
- # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- if re.match('^\S', line) and len(words) > 3:
- current_if = self.parse_interface_line(words, current_if, interfaces)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
- # ipv4/ipv6 lists which is ugly and hard to read.
- # This quick hack merges the dictionaries. Purely cosmetic.
- for iface in interfaces:
- for v in 'ipv4', 'ipv6':
- combined_facts = {}
- for facts in interfaces[iface][v]:
- combined_facts.update(facts)
- if len(combined_facts.keys()) > 0:
- interfaces[iface][v] = [combined_facts]
-
- return interfaces, ips
-
- def parse_interface_line(self, words, current_if, interfaces):
- device = words[0][0:-1]
- if device not in interfaces.keys():
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- else:
- current_if = interfaces[device]
- flags = self.get_options(words[1])
- v = 'ipv4'
- if 'IPv6' in flags:
- v = 'ipv6'
- current_if[v].append({'flags': flags, 'mtu': words[3]})
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
- # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
- # Add leading zero to each octet where needed.
- def parse_ether_line(self, words, current_if, ips):
- macaddress = ''
- for octet in words[1].split(':'):
- octet = ('0' + octet)[-2:None]
- macaddress += (octet + ':')
- current_if['macaddress'] = macaddress[0:-1]
-
-class Virtual(Facts):
- """
- This is a generic Virtual subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this,
- you should define:
- - virtualization_type
- - virtualization_role
- - container (e.g. solaris zones, freebsd jails, linux containers)
-
- All subclasses MUST define platform.
- """
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Virtual.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxVirtual(Virtual):
- """
- This is a Linux-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'Linux'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- # For more information, check: http://people.redhat.com/~rjones/virt-what/
- def get_virtual_facts(self):
- if os.path.exists("/proc/xen"):
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- try:
- for line in get_file_lines('/proc/xen/capabilities'):
- if "control_d" in line:
- self.facts['virtualization_role'] = 'host'
- except IOError:
- pass
- return
-
- if os.path.exists('/proc/vz'):
- self.facts['virtualization_type'] = 'openvz'
- if os.path.exists('/proc/bc'):
- self.facts['virtualization_role'] = 'host'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/1/cgroup'):
- for line in get_file_lines('/proc/1/cgroup'):
- if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
- self.facts['virtualization_type'] = 'docker'
- self.facts['virtualization_role'] = 'guest'
- return
- if re.search('/lxc/', line):
- self.facts['virtualization_type'] = 'lxc'
- self.facts['virtualization_role'] = 'guest'
- return
-
- product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
-
- if product_name in ['KVM', 'Bochs']:
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if product_name == 'RHEV Hypervisor':
- self.facts['virtualization_type'] = 'RHEV'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if product_name == 'VMware Virtual Platform':
- self.facts['virtualization_type'] = 'VMware'
- self.facts['virtualization_role'] = 'guest'
- return
-
- bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
-
- if bios_vendor == 'Xen':
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if bios_vendor == 'innotek GmbH':
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- return
-
- sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
-
- # FIXME: This does also match hyperv
- if sys_vendor == 'Microsoft Corporation':
- self.facts['virtualization_type'] = 'VirtualPC'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'Parallels Software International Inc.':
- self.facts['virtualization_type'] = 'parallels'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'QEMU':
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'oVirt':
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/self/status'):
- for line in get_file_lines('/proc/self/status'):
- if re.match('^VxID: \d+', line):
- self.facts['virtualization_type'] = 'linux_vserver'
- if re.match('^VxID: 0', line):
- self.facts['virtualization_role'] = 'host'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/cpuinfo'):
- for line in get_file_lines('/proc/cpuinfo'):
- if re.match('^model name.*QEMU Virtual CPU', line):
- self.facts['virtualization_type'] = 'kvm'
- elif re.match('^vendor_id.*User Mode Linux', line):
- self.facts['virtualization_type'] = 'uml'
- elif re.match('^model name.*UML', line):
- self.facts['virtualization_type'] = 'uml'
- elif re.match('^vendor_id.*PowerVM Lx86', line):
- self.facts['virtualization_type'] = 'powervm_lx86'
- elif re.match('^vendor_id.*IBM/S390', line):
- self.facts['virtualization_type'] = 'PR/SM'
- lscpu = module.get_bin_path('lscpu')
- if lscpu:
- rc, out, err = module.run_command(["lscpu"])
- if rc == 0:
- for line in out.split("\n"):
- data = line.split(":", 1)
- key = data[0].strip()
- if key == 'Hypervisor':
- self.facts['virtualization_type'] = data[1].strip()
- else:
- self.facts['virtualization_type'] = 'ibm_systemz'
- else:
- continue
- if self.facts['virtualization_type'] == 'PR/SM':
- self.facts['virtualization_role'] = 'LPAR'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- # Beware that we can have both kvm and virtualbox running on a single system
- if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
- modules = []
- for line in get_file_lines("/proc/modules"):
- data = line.split(" ", 1)
- modules.append(data[0])
-
- if 'kvm' in modules:
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'host'
- return
-
- if 'vboxdrv' in modules:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'host'
- return
-
- # If none of the above matches, return 'NA' for virtualization_type
- # and virtualization_role. This allows for proper grouping.
- self.facts['virtualization_type'] = 'NA'
- self.facts['virtualization_role'] = 'NA'
- return
-
-class FreeBSDVirtual(Virtual):
- """
- This is a FreeBSD-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'FreeBSD'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- self.facts['virtualization_type'] = ''
- self.facts['virtualization_role'] = ''
-
-class OpenBSDVirtual(Virtual):
- """
- This is a OpenBSD-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'OpenBSD'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- self.facts['virtualization_type'] = ''
- self.facts['virtualization_role'] = ''
-
-class HPUXVirtual(Virtual):
- """
- This is a HP-UX specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'HP-UX'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- if os.path.exists('/usr/sbin/vecheck'):
- rc, out, err = module.run_command("/usr/sbin/vecheck")
- if rc == 0:
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HP vPar'
- if os.path.exists('/opt/hpvm/bin/hpvminfo'):
- rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
- if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HPVM vPar'
- elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HPVM IVM'
- elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
- self.facts['virtualization_type'] = 'host'
- self.facts['virtualization_role'] = 'HPVM'
- if os.path.exists('/usr/sbin/parstatus'):
- rc, out, err = module.run_command("/usr/sbin/parstatus")
- if rc == 0:
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HP nPar'
-
-
-class SunOSVirtual(Virtual):
- """
- This is a SunOS-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- - container
- """
- platform = 'SunOS'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- rc, out, err = module.run_command("/usr/sbin/prtdiag")
- for line in out.split('\n'):
- if 'VMware' in line:
- self.facts['virtualization_type'] = 'vmware'
- self.facts['virtualization_role'] = 'guest'
- if 'Parallels' in line:
- self.facts['virtualization_type'] = 'parallels'
- self.facts['virtualization_role'] = 'guest'
- if 'VirtualBox' in line:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- if 'HVM domU' in line:
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- # Check if it's a zone
- if os.path.exists("/usr/bin/zonename"):
- rc, out, err = module.run_command("/usr/bin/zonename")
- if out.rstrip() != "global":
- self.facts['container'] = 'zone'
- # Check if it's a branded zone (i.e. Solaris 8/9 zone)
- if os.path.isdir('/.SUNWnative'):
- self.facts['container'] = 'zone'
- # If it's a zone check if we can detect if our global zone is itself virtualized.
- # Relies on the "guest tools" (e.g. vmware tools) to be installed
- if 'container' in self.facts and self.facts['container'] == 'zone':
- rc, out, err = module.run_command("/usr/sbin/modinfo")
- for line in out.split('\n'):
- if 'VMware' in line:
- self.facts['virtualization_type'] = 'vmware'
- self.facts['virtualization_role'] = 'guest'
- if 'VirtualBox' in line:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- # Detect domaining on Sparc hardware
- if os.path.exists("/usr/sbin/virtinfo"):
- # The output of virtinfo is different whether we are on a machine with logical
- # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
- rc, out, err = module.run_command("/usr/sbin/virtinfo -p")
- # The output contains multiple lines with different keys like this:
- # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
- # The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
- # virtinfo can only be run from the global zone
- try:
- for line in out.split('\n'):
- fields = line.split('|')
- if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
- self.facts['virtualization_type'] = 'ldom'
- self.facts['virtualization_role'] = 'guest'
- hostfeatures = []
- for field in fields[2:]:
- arg = field.split('=')
- if( arg[1] == 'true' ):
- hostfeatures.append(arg[0])
- if( len(hostfeatures) > 0 ):
- self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
- except ValueError, e:
- pass
-
-def get_file_content(path, default=None, strip=True):
- data = default
- if os.path.exists(path) and os.access(path, os.R_OK):
- try:
- datafile = open(path)
- data = datafile.read()
- if strip:
- data = data.strip()
- if len(data) == 0:
- data = default
- finally:
- datafile.close()
- return data
-
-def get_file_lines(path):
- '''file.readlines() that closes the file'''
- datafile = open(path)
- try:
- return datafile.readlines()
- finally:
- datafile.close()
-
-def ansible_facts(module):
- facts = {}
- facts.update(Facts().populate())
- facts.update(Hardware().populate())
- facts.update(Network(module).populate())
- facts.update(Virtual().populate())
- return facts
-
-# ===========================================
-
-def get_all_facts(module):
-
- setup_options = dict(module_setup=True)
- facts = ansible_facts(module)
-
- for (k, v) in facts.items():
- setup_options["ansible_%s" % k.replace('-', '_')] = v
-
- # Look for the path to the facter and ohai binary and set
- # the variable to that path.
-
- facter_path = module.get_bin_path('facter')
- ohai_path = module.get_bin_path('ohai')
-
- # if facter is installed, and we can use --json because
- # ruby-json is ALSO installed, include facter data in the JSON
-
- if facter_path is not None:
- rc, out, err = module.run_command(facter_path + " --json")
- facter = True
- try:
- facter_ds = json.loads(out)
- except:
- facter = False
- if facter:
- for (k,v) in facter_ds.items():
- setup_options["facter_%s" % k] = v
-
- # ditto for ohai
-
- if ohai_path is not None:
- rc, out, err = module.run_command(ohai_path)
- ohai = True
- try:
- ohai_ds = json.loads(out)
- except:
- ohai = False
- if ohai:
- for (k,v) in ohai_ds.items():
- k2 = "ohai_%s" % k.replace('-', '_')
- setup_options[k2] = v
-
- setup_result = { 'ansible_facts': {} }
-
- for (k,v) in setup_options.items():
- if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
- setup_result['ansible_facts'][k] = v
-
- # hack to keep --verbose from showing all the setup module results
- setup_result['verbose_override'] = True
-
- return setup_result
diff --git a/v1/ansible/module_utils/gce.py b/v1/ansible/module_utils/gce.py
deleted file mode 100644
index 37a4bf1dea..0000000000
--- a/v1/ansible/module_utils/gce.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import pprint
-
-USER_AGENT_PRODUCT="Ansible-gce"
-USER_AGENT_VERSION="v1"
-
-def gce_connect(module, provider=None):
- """Return a Google Cloud Engine connection."""
- service_account_email = module.params.get('service_account_email', None)
- pem_file = module.params.get('pem_file', None)
- project_id = module.params.get('project_id', None)
-
- # If any of the values are not given as parameters, check the appropriate
- # environment variables.
- if not service_account_email:
- service_account_email = os.environ.get('GCE_EMAIL', None)
- if not project_id:
- project_id = os.environ.get('GCE_PROJECT', None)
- if not pem_file:
- pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
-
- # If we still don't have one or more of our credentials, attempt to
- # get the remaining values from the libcloud secrets file.
- if service_account_email is None or pem_file is None:
- try:
- import secrets
- except ImportError:
- secrets = None
-
- if hasattr(secrets, 'GCE_PARAMS'):
- if not service_account_email:
- service_account_email = secrets.GCE_PARAMS[0]
- if not pem_file:
- pem_file = secrets.GCE_PARAMS[1]
- keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- if not project_id:
- project_id = keyword_params.get('project', None)
-
- # If we *still* don't have the credentials we need, then it's time to
- # just fail out.
- if service_account_email is None or pem_file is None or project_id is None:
- module.fail_json(msg='Missing GCE connection parameters in libcloud '
- 'secrets file.')
- return None
-
- # Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
- if provider is None:
- provider = Provider.GCE
-
- try:
- gce = get_driver(provider)(service_account_email, pem_file,
- datacenter=module.params.get('zone', None),
- project=project_id)
- gce.connection.user_agent_append("%s/%s" % (
- USER_AGENT_PRODUCT, USER_AGENT_VERSION))
- except (RuntimeError, ValueError), e:
- module.fail_json(msg=str(e), changed=False)
- except Exception, e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
-
- return gce
-
-def unexpected_error_msg(error):
- """Create an error string based on passed in error."""
- return 'Unexpected response: ' + pprint.pformat(vars(error))
diff --git a/v1/ansible/module_utils/known_hosts.py b/v1/ansible/module_utils/known_hosts.py
deleted file mode 100644
index 99dbf2c03a..0000000000
--- a/v1/ansible/module_utils/known_hosts.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import hmac
-import urlparse
-
-try:
- from hashlib import sha1
-except ImportError:
- import sha as sha1
-
-HASHED_KEY_MAGIC = "|1|"
-
-def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
-
- """ idempotently add a git url hostkey """
-
- fqdn = get_fqdn(url)
-
- if fqdn:
- known_host = check_hostkey(module, fqdn)
- if not known_host:
- if accept_hostkey:
- rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
- if rc != 0:
- module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
- else:
- module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
-
-def get_fqdn(repo_url):
-
- """ chop the hostname out of a giturl """
-
- result = None
- if "@" in repo_url and "://" not in repo_url:
- # most likely a git@ or ssh+git@ type URL
- repo_url = repo_url.split("@", 1)[1]
- if ":" in repo_url:
- repo_url = repo_url.split(":")[0]
- result = repo_url
- elif "/" in repo_url:
- repo_url = repo_url.split("/")[0]
- result = repo_url
- elif "://" in repo_url:
- # this should be something we can parse with urlparse
- parts = urlparse.urlparse(repo_url)
- if 'ssh' not in parts[0] and 'git' not in parts[0]:
- # don't try and scan a hostname that's not ssh
- return None
- # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
- # ensure we actually have a parts[1] before continuing.
- if parts[1] != '':
- result = parts[1]
- if ":" in result:
- result = result.split(":")[0]
- if "@" in result:
- result = result.split("@", 1)[1]
-
- return result
-
-def check_hostkey(module, fqdn):
- return not not_in_host_file(module, fqdn)
-
-# this is a variant of code found in connection_plugins/paramiko.py and we should modify
-# the paramiko code to import and use this.
-
-def not_in_host_file(self, host):
-
-
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
-
- try:
- host_fh = open(hf)
- except IOError, e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if tokens[0].find(HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- return True
-
-
-def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
-
- """ use ssh-keyscan to add the hostkey """
-
- result = False
- keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
-
- if 'USER' in os.environ:
- user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_ssh_dir = "~/.ssh/"
- user_host_file = "~/.ssh/known_hosts"
- user_ssh_dir = os.path.expanduser(user_ssh_dir)
-
- if not os.path.exists(user_ssh_dir):
- if create_dir:
- try:
- os.makedirs(user_ssh_dir, 0700)
- except:
- module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
- else:
- module.fail_json(msg="%s does not exist" % user_ssh_dir)
- elif not os.path.isdir(user_ssh_dir):
- module.fail_json(msg="%s is not a directory" % user_ssh_dir)
-
- this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
-
- rc, out, err = module.run_command(this_cmd)
- module.append_to_file(user_host_file, out)
-
- return rc, out, err
-
diff --git a/v1/ansible/module_utils/openstack.py b/v1/ansible/module_utils/openstack.py
deleted file mode 100644
index 4069449144..0000000000
--- a/v1/ansible/module_utils/openstack.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-
-
-def openstack_argument_spec():
- # DEPRECATED: This argument spec is only used for the deprecated old
- # OpenStack modules. It turns out that modern OpenStack auth is WAY
- # more complex than this.
- # Consume standard OpenStack environment variables.
- # This is mainly only useful for ad-hoc command line operation as
- # in playbooks one would assume variables would be used appropriately
- OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
- OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
- OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
- OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
- OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
-
- spec = dict(
- login_username = dict(default=OS_USERNAME),
- auth_url = dict(default=OS_AUTH_URL),
- region_name = dict(default=OS_REGION_NAME),
- availability_zone = dict(default=None),
- )
- if OS_PASSWORD:
- spec['login_password'] = dict(default=OS_PASSWORD)
- else:
- spec['login_password'] = dict(required=True)
- if OS_TENANT_NAME:
- spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
- else:
- spec['login_tenant_name'] = dict(required=True)
- return spec
-
-def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
-
- ret = []
- for (k, v) in addresses.iteritems():
- if key_name and k == key_name:
- ret.extend([addrs['addr'] for addrs in v])
- else:
- for interface_spec in v:
- if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
- ret.append(interface_spec['addr'])
- return ret
-
-def openstack_full_argument_spec(**kwargs):
- spec = dict(
- cloud=dict(default=None),
- auth_type=dict(default=None),
- auth=dict(default=None),
- region_name=dict(default=None),
- availability_zone=dict(default=None),
- verify=dict(default=True, aliases=['validate_certs']),
- cacert=dict(default=None),
- cert=dict(default=None),
- key=dict(default=None),
- wait=dict(default=True, type='bool'),
- timeout=dict(default=180, type='int'),
- api_timeout=dict(default=None, type='int'),
- endpoint_type=dict(
- default='public', choices=['public', 'internal', 'admin']
- )
- )
- spec.update(kwargs)
- return spec
-
-
-def openstack_module_kwargs(**kwargs):
- ret = {}
- for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
- if key in kwargs:
- if key in ret:
- ret[key].extend(kwargs[key])
- else:
- ret[key] = kwargs[key]
-
- return ret
diff --git a/v1/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1
deleted file mode 100644
index a11e316989..0000000000
--- a/v1/ansible/module_utils/powershell.ps1
+++ /dev/null
@@ -1,166 +0,0 @@
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2014, and others
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-# Helper function to parse Ansible JSON arguments from a file passed as
-# the single argument to the module
-# Example: $params = Parse-Args $args
-Function Parse-Args($arguments)
-{
- $parameters = New-Object psobject;
- If ($arguments.Length -gt 0)
- {
- $parameters = Get-Content $arguments[0] | ConvertFrom-Json;
- }
- $parameters;
-}
-
-# Helper function to set an "attribute" on a psobject instance in powershell.
-# This is a convenience to make adding Members to the object easier and
-# slightly more pythonic
-# Example: Set-Attr $result "changed" $true
-Function Set-Attr($obj, $name, $value)
-{
- # If the provided $obj is undefined, define one to be nice
- If (-not $obj.GetType)
- {
- $obj = New-Object psobject
- }
-
- $obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
-}
-
-# Helper function to convert a powershell object to JSON to echo it, exiting
-# the script
-# Example: Exit-Json $result
-Function Exit-Json($obj)
-{
- # If the provided $obj is undefined, define one to be nice
- If (-not $obj.GetType)
- {
- $obj = New-Object psobject
- }
-
- echo $obj | ConvertTo-Json -Compress -Depth 99
- Exit
-}
-
-# Helper function to add the "msg" property and "failed" property, convert the
-# powershell object to JSON and echo it, exiting the script
-# Example: Fail-Json $result "This is the failure message"
-Function Fail-Json($obj, $message = $null)
-{
- # If we weren't given 2 args, and the only arg was a string, create a new
- # psobject and use the arg as the failure message
- If ($message -eq $null -and $obj.GetType().Name -eq "String")
- {
- $message = $obj
- $obj = New-Object psobject
- }
- # If the first args is undefined or not an object, make it an object
- ElseIf (-not $obj.GetType -or $obj.GetType().Name -ne "PSCustomObject")
- {
- $obj = New-Object psobject
- }
-
- Set-Attr $obj "msg" $message
- Set-Attr $obj "failed" $true
- echo $obj | ConvertTo-Json -Compress -Depth 99
- Exit 1
-}
-
-# Helper function to get an "attribute" from a psobject instance in powershell.
-# This is a convenience to make getting Members from an object easier and
-# slightly more pythonic
-# Example: $attr = Get-Attr $response "code" -default "1"
-#Note that if you use the failifempty option, you do need to specify resultobject as well.
-Function Get-Attr($obj, $name, $default = $null,$resultobj, $failifempty=$false, $emptyattributefailmessage)
-{
- # Check if the provided Member $name exists in $obj and return it or the
- # default
- If ($obj.$name.GetType)
- {
- $obj.$name
- }
- Elseif($failifempty -eq $false)
- {
- $default
- }
- else
- {
- if (!$emptyattributefailmessage) {$emptyattributefailmessage = "Missing required argument: $name"}
- Fail-Json -obj $resultobj -message $emptyattributefailmessage
- }
- return
-}
-
-# Helper filter/pipeline function to convert a value to boolean following current
-# Ansible practices
-# Example: $is_true = "true" | ConvertTo-Bool
-Function ConvertTo-Bool
-{
- param(
- [parameter(valuefrompipeline=$true)]
- $obj
- )
-
- $boolean_strings = "yes", "on", "1", "true", 1
- $obj_string = [string]$obj
-
- if (($obj.GetType().Name -eq "Boolean" -and $obj) -or $boolean_strings -contains $obj_string.ToLower())
- {
- $true
- }
- Else
- {
- $false
- }
- return
-}
-
-# Helper function to calculate a hash of a file in a way which powershell 3
-# and above can handle:
-Function Get-FileChecksum($path)
-{
- $hash = ""
- If (Test-Path -PathType Leaf $path)
- {
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
- $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
- $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
- $fp.Dispose();
- }
- ElseIf (Test-Path -PathType Container $path)
- {
- $hash= "3";
- }
- Else
- {
- $hash = "1";
- }
- return $hash
-}
diff --git a/v1/ansible/module_utils/rax.py b/v1/ansible/module_utils/rax.py
deleted file mode 100644
index 73b48cc780..0000000000
--- a/v1/ansible/module_utils/rax.py
+++ /dev/null
@@ -1,328 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-from uuid import UUID
-
-
-FINAL_STATUSES = ('ACTIVE', 'ERROR')
-VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
- 'error', 'error_deleting')
-
-CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
- 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
-CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
- 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
- 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
-
-NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
-PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
-SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
-
-
-def rax_slugify(value):
- """Prepend a key with rax_ and normalize the key name"""
- return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
-
-
-def rax_clb_node_to_dict(obj):
- """Function to convert a CLB Node object to a dict"""
- if not obj:
- return {}
- node = obj.to_dict()
- node['id'] = obj.id
- node['weight'] = obj.weight
- return node
-
-
-def rax_to_dict(obj, obj_type='standard'):
- """Generic function to convert a pyrax object to a dict
-
- obj_type values:
- standard
- clb
- server
-
- """
- instance = {}
- for key in dir(obj):
- value = getattr(obj, key)
- if obj_type == 'clb' and key == 'nodes':
- instance[key] = []
- for node in value:
- instance[key].append(rax_clb_node_to_dict(node))
- elif (isinstance(value, list) and len(value) > 0 and
- not isinstance(value[0], NON_CALLABLES)):
- instance[key] = []
- for item in value:
- instance[key].append(rax_to_dict(item))
- elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
- if obj_type == 'server':
- if key == 'image':
- if not value:
- instance['rax_boot_source'] = 'volume'
- else:
- instance['rax_boot_source'] = 'local'
- key = rax_slugify(key)
- instance[key] = value
-
- if obj_type == 'server':
- for attr in ['id', 'accessIPv4', 'name', 'status']:
- instance[attr] = instance.get(rax_slugify(attr))
-
- return instance
-
-
-def rax_find_bootable_volume(module, rax_module, server, exit=True):
- """Find a servers bootable volume"""
- cs = rax_module.cloudservers
- cbs = rax_module.cloud_blockstorage
- server_id = rax_module.utils.get_id(server)
- volumes = cs.volumes.get_server_volumes(server_id)
- bootable_volumes = []
- for volume in volumes:
- vol = cbs.get(volume)
- if module.boolean(vol.bootable):
- bootable_volumes.append(vol)
- if not bootable_volumes:
- if exit:
- module.fail_json(msg='No bootable volumes could be found for '
- 'server %s' % server_id)
- else:
- return False
- elif len(bootable_volumes) > 1:
- if exit:
- module.fail_json(msg='Multiple bootable volumes found for server '
- '%s' % server_id)
- else:
- return False
-
- return bootable_volumes[0]
-
-
-def rax_find_image(module, rax_module, image, exit=True):
- """Find a server image by ID or Name"""
- cs = rax_module.cloudservers
- try:
- UUID(image)
- except ValueError:
- try:
- image = cs.images.find(human_id=image)
- except(cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- try:
- image = cs.images.find(name=image)
- except (cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- if exit:
- module.fail_json(msg='No matching image found (%s)' %
- image)
- else:
- return False
-
- return rax_module.utils.get_id(image)
-
-
-def rax_find_volume(module, rax_module, name):
- """Find a Block storage volume by ID or name"""
- cbs = rax_module.cloud_blockstorage
- try:
- UUID(name)
- volume = cbs.get(name)
- except ValueError:
- try:
- volume = cbs.find(name=name)
- except rax_module.exc.NotFound:
- volume = None
- except Exception, e:
- module.fail_json(msg='%s' % e)
- return volume
-
-
-def rax_find_network(module, rax_module, network):
- """Find a cloud network by ID or name"""
- cnw = rax_module.cloud_networks
- try:
- UUID(network)
- except ValueError:
- if network.lower() == 'public':
- return cnw.get_server_networks(PUBLIC_NET_ID)
- elif network.lower() == 'private':
- return cnw.get_server_networks(SERVICE_NET_ID)
- else:
- try:
- network_obj = cnw.find_network_by_label(network)
- except (rax_module.exceptions.NetworkNotFound,
- rax_module.exceptions.NetworkLabelNotUnique):
- module.fail_json(msg='No matching network found (%s)' %
- network)
- else:
- return cnw.get_server_networks(network_obj)
- else:
- return cnw.get_server_networks(network)
-
-
-def rax_find_server(module, rax_module, server):
- """Find a Cloud Server by ID or name"""
- cs = rax_module.cloudservers
- try:
- UUID(server)
- server = cs.servers.get(server)
- except ValueError:
- servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
- if not servers:
- module.fail_json(msg='No Server was matched by name, '
- 'try using the Server ID instead')
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers matched by name, '
- 'try using the Server ID instead')
-
- # We made it this far, grab the first and hopefully only server
- # in the list
- server = servers[0]
- return server
-
-
-def rax_find_loadbalancer(module, rax_module, loadbalancer):
- """Find a Cloud Load Balancer by ID or name"""
- clb = rax_module.cloud_loadbalancers
- try:
- found = clb.get(loadbalancer)
- except:
- found = []
- for lb in clb.list():
- if loadbalancer == lb.name:
- found.append(lb)
-
- if not found:
- module.fail_json(msg='No loadbalancer was matched')
-
- if len(found) > 1:
- module.fail_json(msg='Multiple loadbalancers matched')
-
- # We made it this far, grab the first and hopefully only item
- # in the list
- found = found[0]
-
- return found
-
-
-def rax_argument_spec():
- """Return standard base dictionary used for the argument_spec
- argument in AnsibleModule
-
- """
- return dict(
- api_key=dict(type='str', aliases=['password'], no_log=True),
- auth_endpoint=dict(type='str'),
- credentials=dict(type='str', aliases=['creds_file']),
- env=dict(type='str'),
- identity_type=dict(type='str', default='rackspace'),
- region=dict(type='str'),
- tenant_id=dict(type='str'),
- tenant_name=dict(type='str'),
- username=dict(type='str'),
- verify_ssl=dict(choices=BOOLEANS, type='bool'),
- )
-
-
-def rax_required_together():
- """Return the default list used for the required_together argument to
- AnsibleModule"""
- return [['api_key', 'username']]
-
-
-def setup_rax_module(module, rax_module, region_required=True):
- """Set up pyrax in a standard way for all modules"""
- rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
- rax_module.USER_AGENT)
-
- api_key = module.params.get('api_key')
- auth_endpoint = module.params.get('auth_endpoint')
- credentials = module.params.get('credentials')
- env = module.params.get('env')
- identity_type = module.params.get('identity_type')
- region = module.params.get('region')
- tenant_id = module.params.get('tenant_id')
- tenant_name = module.params.get('tenant_name')
- username = module.params.get('username')
- verify_ssl = module.params.get('verify_ssl')
-
- if env is not None:
- rax_module.set_environment(env)
-
- rax_module.set_setting('identity_type', identity_type)
- if verify_ssl is not None:
- rax_module.set_setting('verify_ssl', verify_ssl)
- if auth_endpoint is not None:
- rax_module.set_setting('auth_endpoint', auth_endpoint)
- if tenant_id is not None:
- rax_module.set_setting('tenant_id', tenant_id)
- if tenant_name is not None:
- rax_module.set_setting('tenant_name', tenant_name)
-
- try:
- username = username or os.environ.get('RAX_USERNAME')
- if not username:
- username = rax_module.get_setting('keyring_username')
- if username:
- api_key = 'USE_KEYRING'
- if not api_key:
- api_key = os.environ.get('RAX_API_KEY')
- credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
- os.environ.get('RAX_CREDS_FILE'))
- region = (region or os.environ.get('RAX_REGION') or
- rax_module.get_setting('region'))
- except KeyError, e:
- module.fail_json(msg='Unable to load %s' % e.message)
-
- try:
- if api_key and username:
- if api_key == 'USE_KEYRING':
- rax_module.keyring_auth(username, region=region)
- else:
- rax_module.set_credentials(username, api_key=api_key,
- region=region)
- elif credentials:
- credentials = os.path.expanduser(credentials)
- rax_module.set_credential_file(credentials, region=region)
- else:
- raise Exception('No credentials supplied!')
- except Exception, e:
- if e.message:
- msg = str(e.message)
- else:
- msg = repr(e)
- module.fail_json(msg=msg)
-
- if region_required and region not in rax_module.regions:
- module.fail_json(msg='%s is not a valid region, must be one of: %s' %
- (region, ','.join(rax_module.regions)))
-
- return rax_module
diff --git a/v1/ansible/module_utils/redhat.py b/v1/ansible/module_utils/redhat.py
deleted file mode 100644
index bf19ccf390..0000000000
--- a/v1/ansible/module_utils/redhat.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), James Laska
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import re
-import types
-import ConfigParser
-import shlex
-
-
-class RegistrationBase(object):
- def __init__(self, module, username=None, password=None):
- self.module = module
- self.username = username
- self.password = password
-
- def configure(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def enable(self):
- # Remove any existing redhat.repo
- redhat_repo = '/etc/yum.repos.d/redhat.repo'
- if os.path.isfile(redhat_repo):
- os.unlink(redhat_repo)
-
- def register(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unregister(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unsubscribe(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def update_plugin_conf(self, plugin, enabled=True):
- plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
- if os.path.isfile(plugin_conf):
- cfg = ConfigParser.ConfigParser()
- cfg.read([plugin_conf])
- if enabled:
- cfg.set('main', 'enabled', 1)
- else:
- cfg.set('main', 'enabled', 0)
- fd = open(plugin_conf, 'rwa+')
- cfg.write(fd)
- fd.close()
-
- def subscribe(self, **kwargs):
- raise NotImplementedError("Must be implemented by a sub-class")
-
-
-class Rhsm(RegistrationBase):
- def __init__(self, module, username=None, password=None):
- RegistrationBase.__init__(self, module, username, password)
- self.config = self._read_config()
- self.module = module
-
- def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
- '''
- Load RHSM configuration from /etc/rhsm/rhsm.conf.
- Returns:
- * ConfigParser object
- '''
-
- # Read RHSM defaults ...
- cp = ConfigParser.ConfigParser()
- cp.read(rhsm_conf)
-
- # Add support for specifying a default value w/o having to standup some configuration
- # Yeah, I know this should be subclassed ... but, oh well
- def get_option_default(self, key, default=''):
- sect, opt = key.split('.', 1)
- if self.has_section(sect) and self.has_option(sect, opt):
- return self.get(sect, opt)
- else:
- return default
-
- cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
-
- return cp
-
- def enable(self):
- '''
- Enable the system to receive updates from subscription-manager.
- This involves updating affected yum plugins and removing any
- conflicting yum repositories.
- '''
- RegistrationBase.enable(self)
- self.update_plugin_conf('rhnplugin', False)
- self.update_plugin_conf('subscription-manager', True)
-
- def configure(self, **kwargs):
- '''
- Configure the system as directed for registration with RHN
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'config']
-
- # Pass supplied **kwargs as parameters to subscription-manager. Ignore
- # non-configuration parameters and replace '_' with '.'. For example,
- # 'server_hostname' becomes '--system.hostname'.
- for k,v in kwargs.items():
- if re.search(r'^(system|rhsm)_', k):
- args.append('--%s=%s' % (k.replace('_','.'), v))
-
- self.module.run_command(args, check_rc=True)
-
- @property
- def is_registered(self):
- '''
- Determine whether the current system
- Returns:
- * Boolean - whether the current system is currently registered to
- RHN.
- '''
- # Quick version...
- if False:
- return os.path.isfile('/etc/pki/consumer/cert.pem') and \
- os.path.isfile('/etc/pki/consumer/key.pem')
-
- args = ['subscription-manager', 'identity']
- rc, stdout, stderr = self.module.run_command(args, check_rc=False)
- if rc == 0:
- return True
- else:
- return False
-
- def register(self, username, password, autosubscribe, activationkey):
- '''
- Register the current system to the provided RHN server
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'register']
-
- # Generate command arguments
- if activationkey:
- args.append('--activationkey "%s"' % activationkey)
- else:
- if autosubscribe:
- args.append('--autosubscribe')
- if username:
- args.extend(['--username', username])
- if password:
- args.extend(['--password', password])
-
- # Do the needful...
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def unsubscribe(self):
- '''
- Unsubscribe a system from all subscribed channels
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'unsubscribe', '--all']
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def unregister(self):
- '''
- Unregister a currently registered system
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'unregister']
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def subscribe(self, regexp):
- '''
- Subscribe current system to available pools matching the specified
- regular expression
- Raises:
- * Exception - if error occurs while running command
- '''
-
- # Available pools ready for subscription
- available_pools = RhsmPools(self.module)
-
- for pool in available_pools.filter(regexp):
- pool.subscribe()
-
-
-class RhsmPool(object):
- '''
- Convenience class for housing subscription information
- '''
-
- def __init__(self, module, **kwargs):
- self.module = module
- for k,v in kwargs.items():
- setattr(self, k, v)
-
- def __str__(self):
- return str(self.__getattribute__('_name'))
-
- def subscribe(self):
- args = "subscription-manager subscribe --pool %s" % self.PoolId
- rc, stdout, stderr = self.module.run_command(args, check_rc=True)
- if rc == 0:
- return True
- else:
- return False
-
-
-class RhsmPools(object):
- """
- This class is used for manipulating pools subscriptions with RHSM
- """
- def __init__(self, module):
- self.module = module
- self.products = self._load_product_list()
-
- def __iter__(self):
- return self.products.__iter__()
-
- def _load_product_list(self):
- """
- Loads list of all available pools for system in data structure
- """
- args = "subscription-manager list --available"
- rc, stdout, stderr = self.module.run_command(args, check_rc=True)
-
- products = []
- for line in stdout.split('\n'):
- # Remove leading+trailing whitespace
- line = line.strip()
- # An empty line implies the end of an output group
- if len(line) == 0:
- continue
- # If a colon ':' is found, parse
- elif ':' in line:
- (key, value) = line.split(':',1)
- key = key.strip().replace(" ", "") # To unify
- value = value.strip()
- if key in ['ProductName', 'SubscriptionName']:
- # Remember the name for later processing
- products.append(RhsmPool(self.module, _name=value, key=value))
- elif products:
- # Associate value with most recently recorded product
- products[-1].__setattr__(key, value)
- # FIXME - log some warning?
- #else:
- # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
- return products
-
- def filter(self, regexp='^$'):
- '''
- Return a list of RhsmPools whose name matches the provided regular expression
- '''
- r = re.compile(regexp)
- for product in self.products:
- if r.search(product._name):
- yield product
-
diff --git a/v1/ansible/module_utils/splitter.py b/v1/ansible/module_utils/splitter.py
deleted file mode 100644
index 899fa8cd92..0000000000
--- a/v1/ansible/module_utils/splitter.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# (c) 2014 James Cammarata, <jcammarata@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-def _get_quote_state(token, quote_char):
- '''
- the goal of this block is to determine if the quoted string
- is unterminated in which case it needs to be put back together
- '''
- # the char before the current one, used to see if
- # the current character is escaped
- prev_char = None
- for idx, cur_char in enumerate(token):
- if idx > 0:
- prev_char = token[idx-1]
- if cur_char in '"\'' and prev_char != '\\':
- if quote_char:
- if cur_char == quote_char:
- quote_char = None
- else:
- quote_char = cur_char
- return quote_char
-
-def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
- '''
- this function counts the number of opening/closing blocks for a
- given opening/closing type and adjusts the current depth for that
- block based on the difference
- '''
- num_open = token.count(open_token)
- num_close = token.count(close_token)
- if num_open != num_close:
- cur_depth += (num_open - num_close)
- if cur_depth < 0:
- cur_depth = 0
- return cur_depth
-
-def split_args(args):
- '''
- Splits args on whitespace, but intelligently reassembles
- those that may have been split over a jinja2 block or quotes.
-
- When used in a remote module, we won't ever have to be concerned about
- jinja2 blocks, however this function is/will be used in the
- core portions as well before the args are templated.
-
- example input: a=b c="foo bar"
- example output: ['a=b', 'c="foo bar"']
-
- Basically this is a variation shlex that has some more intelligence for
- how Ansible needs to use it.
- '''
-
- # the list of params parsed out of the arg string
- # this is going to be the result value when we are donei
- params = []
-
- # here we encode the args, so we have a uniform charset to
- # work with, and split on white space
- args = args.strip()
- try:
- args = args.encode('utf-8')
- do_decode = True
- except UnicodeDecodeError:
- do_decode = False
- items = args.split('\n')
-
- # iterate over the tokens, and reassemble any that may have been
- # split on a space inside a jinja2 block.
- # ex if tokens are "{{", "foo", "}}" these go together
-
- # These variables are used
- # to keep track of the state of the parsing, since blocks and quotes
- # may be nested within each other.
-
- quote_char = None
- inside_quotes = False
- print_depth = 0 # used to count nested jinja2 {{ }} blocks
- block_depth = 0 # used to count nested jinja2 {% %} blocks
- comment_depth = 0 # used to count nested jinja2 {# #} blocks
-
- # now we loop over each split chunk, coalescing tokens if the white space
- # split occurred within quotes or a jinja2 block of some kind
- for itemidx,item in enumerate(items):
-
- # we split on spaces and newlines separately, so that we
- # can tell which character we split on for reassembly
- # inside quotation characters
- tokens = item.strip().split(' ')
-
- line_continuation = False
- for idx,token in enumerate(tokens):
-
- # if we hit a line continuation character, but
- # we're not inside quotes, ignore it and continue
- # on to the next token while setting a flag
- if token == '\\' and not inside_quotes:
- line_continuation = True
- continue
-
- # store the previous quoting state for checking later
- was_inside_quotes = inside_quotes
- quote_char = _get_quote_state(token, quote_char)
- inside_quotes = quote_char is not None
-
- # multiple conditions may append a token to the list of params,
- # so we keep track with this flag to make sure it only happens once
- # append means add to the end of the list, don't append means concatenate
- # it to the end of the last token
- appended = False
-
- # if we're inside quotes now, but weren't before, append the token
- # to the end of the list, since we'll tack on more to it later
- # otherwise, if we're inside any jinja2 block, inside quotes, or we were
- # inside quotes (but aren't now) concat this token to the last param
- if inside_quotes and not was_inside_quotes:
- params.append(token)
- appended = True
- elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
- if idx == 0 and not inside_quotes and was_inside_quotes:
- params[-1] = "%s%s" % (params[-1], token)
- elif len(tokens) > 1:
- spacer = ''
- if idx > 0:
- spacer = ' '
- params[-1] = "%s%s%s" % (params[-1], spacer, token)
- else:
- spacer = ''
- if not params[-1].endswith('\n') and idx == 0:
- spacer = '\n'
- params[-1] = "%s%s%s" % (params[-1], spacer, token)
- appended = True
-
- # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
- # and may append the current token to the params (if we haven't previously done so)
- prev_print_depth = print_depth
- print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
- if print_depth != prev_print_depth and not appended:
- params.append(token)
- appended = True
-
- prev_block_depth = block_depth
- block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
- if block_depth != prev_block_depth and not appended:
- params.append(token)
- appended = True
-
- prev_comment_depth = comment_depth
- comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
- if comment_depth != prev_comment_depth and not appended:
- params.append(token)
- appended = True
-
- # finally, if we're at zero depth for all blocks and not inside quotes, and have not
- # yet appended anything to the list of params, we do so now
- if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
- params.append(token)
-
- # if this was the last token in the list, and we have more than
- # one item (meaning we split on newlines), add a newline back here
- # to preserve the original structure
- if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
- if not params[-1].endswith('\n') or item == '':
- params[-1] += '\n'
-
- # always clear the line continuation flag
- line_continuation = False
-
- # If we're done and things are not at zero depth or we're still inside quotes,
- # raise an error to indicate that the args were unbalanced
- if print_depth or block_depth or comment_depth or inside_quotes:
- raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
-
- # finally, we decode each param back to the unicode it was in the arg string
- if do_decode:
- params = [x.decode('utf-8') for x in params]
-
- return params
-
-def is_quoted(data):
- return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
-
-def unquote(data):
- ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
- if is_quoted(data):
- return data[1:-1]
- return data
-
diff --git a/v1/ansible/module_utils/urls.py b/v1/ansible/module_utils/urls.py
deleted file mode 100644
index 18317e86ae..0000000000
--- a/v1/ansible/module_utils/urls.py
+++ /dev/null
@@ -1,496 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- import urllib
- HAS_URLLIB = True
-except:
- HAS_URLLIB = False
-
-try:
- import urllib2
- HAS_URLLIB2 = True
-except:
- HAS_URLLIB2 = False
-
-try:
- import urlparse
- HAS_URLPARSE = True
-except:
- HAS_URLPARSE = False
-
-try:
- import ssl
- HAS_SSL=True
-except:
- HAS_SSL=False
-
-HAS_MATCH_HOSTNAME = True
-try:
- from ssl import match_hostname, CertificateError
-except ImportError:
- try:
- from backports.ssl_match_hostname import match_hostname, CertificateError
- except ImportError:
- HAS_MATCH_HOSTNAME = False
-
-import httplib
-import os
-import re
-import socket
-import tempfile
-
-
-# This is a dummy cacert provided for Mac OS since you need at least 1
-# ca cert, regardless of validity, for Python on Mac OS to use the
-# keychain functionality in OpenSSL for validating SSL certificates.
-# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
-DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
-MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
-BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
-MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
-MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
-VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
-gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
-gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
-4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
-gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
-FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
-CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
-aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
-MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
-qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
-zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
------END CERTIFICATE-----
-"""
-
-class CustomHTTPSConnection(httplib.HTTPSConnection):
- def connect(self):
- "Connect to a host on a given (SSL) port."
-
- if hasattr(self, 'source_address'):
- sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
- else:
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if self._tunnel_host:
- self.sock = sock
- self._tunnel()
- self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
-
-class CustomHTTPSHandler(urllib2.HTTPSHandler):
-
- def https_open(self, req):
- return self.do_open(CustomHTTPSConnection, req)
-
- https_request = urllib2.AbstractHTTPHandler.do_request_
-
-def generic_urlparse(parts):
- '''
- Returns a dictionary of url parts as parsed by urlparse,
- but accounts for the fact that older versions of that
- library do not support named attributes (ie. .netloc)
- '''
- generic_parts = dict()
- if hasattr(parts, 'netloc'):
- # urlparse is newer, just read the fields straight
- # from the parts object
- generic_parts['scheme'] = parts.scheme
- generic_parts['netloc'] = parts.netloc
- generic_parts['path'] = parts.path
- generic_parts['params'] = parts.params
- generic_parts['query'] = parts.query
- generic_parts['fragment'] = parts.fragment
- generic_parts['username'] = parts.username
- generic_parts['password'] = parts.password
- generic_parts['hostname'] = parts.hostname
- generic_parts['port'] = parts.port
- else:
- # we have to use indexes, and then parse out
- # the other parts not supported by indexing
- generic_parts['scheme'] = parts[0]
- generic_parts['netloc'] = parts[1]
- generic_parts['path'] = parts[2]
- generic_parts['params'] = parts[3]
- generic_parts['query'] = parts[4]
- generic_parts['fragment'] = parts[5]
- # get the username, password, etc.
- try:
- netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
- (auth, hostname, port) = netloc_re.match(parts[1])
- if port:
- # the capture group for the port will include the ':',
- # so remove it and convert the port to an integer
- port = int(port[1:])
- if auth:
- # the capture group above inclues the @, so remove it
- # and then split it up based on the first ':' found
- auth = auth[:-1]
- username, password = auth.split(':', 1)
- generic_parts['username'] = username
- generic_parts['password'] = password
- generic_parts['hostname'] = hostnme
- generic_parts['port'] = port
- except:
- generic_parts['username'] = None
- generic_parts['password'] = None
- generic_parts['hostname'] = None
- generic_parts['port'] = None
- return generic_parts
-
-class RequestWithMethod(urllib2.Request):
- '''
- Workaround for using DELETE/PUT/etc with urllib2
- Originally contained in library/net_infrastructure/dnsmadeeasy
- '''
-
- def __init__(self, url, method, data=None, headers={}):
- self._method = method
- urllib2.Request.__init__(self, url, data, headers)
-
- def get_method(self):
- if self._method:
- return self._method
- else:
- return urllib2.Request.get_method(self)
-
-
-class SSLValidationHandler(urllib2.BaseHandler):
- '''
- A custom handler class for SSL validation.
-
- Based on:
- http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
- http://techknack.net/python-urllib2-handlers/
- '''
- CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
-
- def __init__(self, module, hostname, port):
- self.module = module
- self.hostname = hostname
- self.port = port
-
- def get_ca_certs(self):
- # tries to find a valid CA cert in one of the
- # standard locations for the current distribution
-
- ca_certs = []
- paths_checked = []
- platform = get_platform()
- distribution = get_distribution()
-
- # build a list of paths to check for .crt/.pem files
- # based on the platform type
- paths_checked.append('/etc/ssl/certs')
- if platform == 'Linux':
- paths_checked.append('/etc/pki/ca-trust/extracted/pem')
- paths_checked.append('/etc/pki/tls/certs')
- paths_checked.append('/usr/share/ca-certificates/cacert.org')
- elif platform == 'FreeBSD':
- paths_checked.append('/usr/local/share/certs')
- elif platform == 'OpenBSD':
- paths_checked.append('/etc/ssl')
- elif platform == 'NetBSD':
- ca_certs.append('/etc/openssl/certs')
- elif platform == 'SunOS':
- paths_checked.append('/opt/local/etc/openssl/certs')
-
- # fall back to a user-deployed cert in a standard
- # location if the OS platform one is not available
- paths_checked.append('/etc/ansible')
-
- tmp_fd, tmp_path = tempfile.mkstemp()
-
- # Write the dummy ca cert if we are running on Mac OS X
- if platform == 'Darwin':
- os.write(tmp_fd, DUMMY_CA_CERT)
- # Default Homebrew path for OpenSSL certs
- paths_checked.append('/usr/local/etc/openssl')
-
- # for all of the paths, find any .crt or .pem files
- # and compile them into single temp file for use
- # in the ssl check to speed up the test
- for path in paths_checked:
- if os.path.exists(path) and os.path.isdir(path):
- dir_contents = os.listdir(path)
- for f in dir_contents:
- full_path = os.path.join(path, f)
- if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
- try:
- cert_file = open(full_path, 'r')
- os.write(tmp_fd, cert_file.read())
- os.write(tmp_fd, '\n')
- cert_file.close()
- except:
- pass
-
- return (tmp_path, paths_checked)
-
- def validate_proxy_response(self, response, valid_codes=[200]):
- '''
- make sure we get back a valid code from the proxy
- '''
- try:
- (http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
- if int(resp_code) not in valid_codes:
- raise Exception
- except:
- self.module.fail_json(msg='Connection to proxy failed')
-
- def detect_no_proxy(self, url):
- '''
- Detect if the 'no_proxy' environment variable is set and honor those locations.
- '''
- env_no_proxy = os.environ.get('no_proxy')
- if env_no_proxy:
- env_no_proxy = env_no_proxy.split(',')
- netloc = urlparse.urlparse(url).netloc
-
- for host in env_no_proxy:
- if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
- # Our requested URL matches something in no_proxy, so don't
- # use the proxy for this
- return False
- return True
-
- def http_request(self, req):
- tmp_ca_cert_path, paths_checked = self.get_ca_certs()
- https_proxy = os.environ.get('https_proxy')
-
- # Detect if 'no_proxy' environment variable is set and if our URL is included
- use_proxy = self.detect_no_proxy(req.get_full_url())
-
- if not use_proxy:
- # ignore proxy settings for this host request
- return req
-
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if https_proxy:
- proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
- s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
- if proxy_parts.get('scheme') == 'http':
- s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
- if proxy_parts.get('username'):
- credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
- s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
- s.sendall('\r\n')
- connect_result = s.recv(4096)
- self.validate_proxy_response(connect_result)
- ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
- match_hostname(ssl_s.getpeercert(), self.hostname)
- else:
- self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
- else:
- s.connect((self.hostname, self.port))
- ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
- match_hostname(ssl_s.getpeercert(), self.hostname)
- # close the ssl connection
- #ssl_s.unwrap()
- s.close()
- except (ssl.SSLError, socket.error), e:
- # fail if we tried all of the certs but none worked
- if 'connection refused' in str(e).lower():
- self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port))
- else:
- self.module.fail_json(
- msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
- 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
- 'Paths checked for this platform: %s' % ", ".join(paths_checked)
- )
- except CertificateError:
- self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname)
-
- try:
- # cleanup the temp file created, don't worry
- # if it fails for some reason
- os.remove(tmp_ca_cert_path)
- except:
- pass
-
- return req
-
- https_request = http_request
-
-
-def url_argument_spec():
- '''
- Creates an argument spec that can be used with any module
- that will be requesting content via urllib/urllib2
- '''
- return dict(
- url = dict(),
- force = dict(default='no', aliases=['thirsty'], type='bool'),
- http_agent = dict(default='ansible-httpget'),
- use_proxy = dict(default='yes', type='bool'),
- validate_certs = dict(default='yes', type='bool'),
- url_username = dict(required=False),
- url_password = dict(required=False),
- )
-
-
-def fetch_url(module, url, data=None, headers=None, method=None,
- use_proxy=True, force=False, last_mod_time=None, timeout=10):
- '''
- Fetches a file from an HTTP/FTP server using urllib2
- '''
-
- if not HAS_URLLIB:
- module.fail_json(msg='urllib is not installed')
- if not HAS_URLLIB2:
- module.fail_json(msg='urllib2 is not installed')
- elif not HAS_URLPARSE:
- module.fail_json(msg='urlparse is not installed')
-
- r = None
- handlers = []
- info = dict(url=url)
-
- distribution = get_distribution()
- # Get validate_certs from the module params
- validate_certs = module.params.get('validate_certs', True)
-
- # FIXME: change the following to use the generic_urlparse function
- # to remove the indexed references for 'parsed'
- parsed = urlparse.urlparse(url)
- if parsed[0] == 'https' and validate_certs:
- if not HAS_SSL:
- if distribution == 'Redhat':
- module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL')
- else:
- module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
- if not HAS_MATCH_HOSTNAME:
- module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended')
-
- # do the cert validation
- netloc = parsed[1]
- if '@' in netloc:
- netloc = netloc.split('@', 1)[1]
- if ':' in netloc:
- hostname, port = netloc.split(':', 1)
- port = int(port)
- else:
- hostname = netloc
- port = 443
- # create the SSL validation handler and
- # add it to the list of handlers
- ssl_handler = SSLValidationHandler(module, hostname, port)
- handlers.append(ssl_handler)
-
- if parsed[0] != 'ftp':
- username = module.params.get('url_username', '')
- if username:
- password = module.params.get('url_password', '')
- netloc = parsed[1]
- elif '@' in parsed[1]:
- credentials, netloc = parsed[1].split('@', 1)
- if ':' in credentials:
- username, password = credentials.split(':', 1)
- else:
- username = credentials
- password = ''
-
- parsed = list(parsed)
- parsed[1] = netloc
-
- # reconstruct url without credentials
- url = urlparse.urlunparse(parsed)
-
- if username:
- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
-
- # this creates a password manager
- passman.add_password(None, netloc, username, password)
-
- # because we have put None at the start it will always
- # use this username/password combination for urls
- # for which `theurl` is a super-url
- authhandler = urllib2.HTTPBasicAuthHandler(passman)
-
- # create the AuthHandler
- handlers.append(authhandler)
-
- if not use_proxy:
- proxyhandler = urllib2.ProxyHandler({})
- handlers.append(proxyhandler)
-
- # pre-2.6 versions of python cannot use the custom https
- # handler, since the socket class is lacking this method
- if hasattr(socket, 'create_connection'):
- handlers.append(CustomHTTPSHandler)
-
- opener = urllib2.build_opener(*handlers)
- urllib2.install_opener(opener)
-
- if method:
- if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
- module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
- request = RequestWithMethod(url, method.upper(), data)
- else:
- request = urllib2.Request(url, data)
-
- # add the custom agent header, to help prevent issues
- # with sites that block the default urllib agent string
- request.add_header('User-agent', module.params.get('http_agent'))
-
- # if we're ok with getting a 304, set the timestamp in the
- # header, otherwise make sure we don't get a cached copy
- if last_mod_time and not force:
- tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
- request.add_header('If-Modified-Since', tstamp)
- else:
- request.add_header('cache-control', 'no-cache')
-
- # user defined headers now, which may override things we've set above
- if headers:
- if not isinstance(headers, dict):
- module.fail_json("headers provided to fetch_url() must be a dict")
- for header in headers:
- request.add_header(header, headers[header])
-
- try:
- if sys.version_info < (2,6,0):
- # urlopen in python prior to 2.6.0 did not
- # have a timeout parameter
- r = urllib2.urlopen(request, None)
- else:
- r = urllib2.urlopen(request, None, timeout)
- info.update(r.info())
- info['url'] = r.geturl() # The URL goes in too, because of redirects.
- info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
- except urllib2.HTTPError, e:
- info.update(dict(msg=str(e), status=e.code))
- except urllib2.URLError, e:
- code = int(getattr(e, 'code', -1))
- info.update(dict(msg="Request failed: %s" % str(e), status=code))
- except socket.error, e:
- info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
- except Exception, e:
- info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
-
- return r, info
-
diff --git a/v1/ansible/modules/core b/v1/ansible/modules/core
deleted file mode 160000
-Subproject f8d8af17cdc72500af8319c96004b86ac702a0a
diff --git a/v1/ansible/modules/extras b/v1/ansible/modules/extras
deleted file mode 160000
-Subproject 495ad450e53feb1cd26218dc68056cc34d1ea9f
diff --git a/v1/ansible/playbook/__init__.py b/v1/ansible/playbook/__init__.py
deleted file mode 100644
index 24ba2d3c6e..0000000000
--- a/v1/ansible/playbook/__init__.py
+++ /dev/null
@@ -1,874 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.inventory
-import ansible.constants as C
-import ansible.runner
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.callbacks
-import ansible.cache
-import os
-import shlex
-import collections
-from play import Play
-import StringIO
-import pipes
-
-# the setup cache stores all variables about a host
-# gathered during the setup step, while the vars cache
-# holds all other variables about a host
-SETUP_CACHE = ansible.cache.FactCache()
-VARS_CACHE = collections.defaultdict(dict)
-RESERVED_TAGS = ['all','tagged','untagged','always']
-
-
-class PlayBook(object):
- '''
- runs an ansible playbook, given as a datastructure or YAML filename.
- A playbook is a deployment, config management, or automation based
- set of commands to run in series.
-
- multiple plays/tasks do not execute simultaneously, but tasks in each
- pattern do execute in parallel (according to the number of forks
- requested) among the hosts they address
- '''
-
- # *****************************************************
-
- def __init__(self,
- playbook = None,
- host_list = C.DEFAULT_HOST_LIST,
- module_path = None,
- forks = C.DEFAULT_FORKS,
- timeout = C.DEFAULT_TIMEOUT,
- remote_user = C.DEFAULT_REMOTE_USER,
- remote_pass = C.DEFAULT_REMOTE_PASS,
- remote_port = None,
- transport = C.DEFAULT_TRANSPORT,
- private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
- callbacks = None,
- runner_callbacks = None,
- stats = None,
- extra_vars = None,
- only_tags = None,
- skip_tags = None,
- subset = C.DEFAULT_SUBSET,
- inventory = None,
- check = False,
- diff = False,
- any_errors_fatal = False,
- vault_password = False,
- force_handlers = False,
- # privilege escalation
- become = C.DEFAULT_BECOME,
- become_method = C.DEFAULT_BECOME_METHOD,
- become_user = C.DEFAULT_BECOME_USER,
- become_pass = None,
- ):
-
- """
- playbook: path to a playbook file
- host_list: path to a file like /etc/ansible/hosts
- module_path: path to ansible modules, like /usr/share/ansible/
- forks: desired level of parallelism
- timeout: connection timeout
- remote_user: run as this user if not specified in a particular play
- remote_pass: use this remote password (for all plays) vs using SSH keys
- remote_port: default remote port to use if not specified with the host or play
- transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
- callbacks output callbacks for the playbook
- runner_callbacks: more callbacks, this time for the runner API
- stats: holds aggregrate data about events occurring to each host
- inventory: can be specified instead of host_list to use a pre-existing inventory object
- check: don't change anything, just try to detect some potential changes
- any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
- force_handlers: continue to notify and run handlers even if a task fails
- """
-
- self.SETUP_CACHE = SETUP_CACHE
- self.VARS_CACHE = VARS_CACHE
-
- arguments = []
- if playbook is None:
- arguments.append('playbook')
- if callbacks is None:
- arguments.append('callbacks')
- if runner_callbacks is None:
- arguments.append('runner_callbacks')
- if stats is None:
- arguments.append('stats')
- if arguments:
- raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
-
- if extra_vars is None:
- extra_vars = {}
- if only_tags is None:
- only_tags = [ 'all' ]
- if skip_tags is None:
- skip_tags = []
-
- self.check = check
- self.diff = diff
- self.module_path = module_path
- self.forks = forks
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.transport = transport
- self.callbacks = callbacks
- self.runner_callbacks = runner_callbacks
- self.stats = stats
- self.extra_vars = extra_vars
- self.global_vars = {}
- self.private_key_file = private_key_file
- self.only_tags = only_tags
- self.skip_tags = skip_tags
- self.any_errors_fatal = any_errors_fatal
- self.vault_password = vault_password
- self.force_handlers = force_handlers
-
- self.become = become
- self.become_method = become_method
- self.become_user = become_user
- self.become_pass = become_pass
-
- self.callbacks.playbook = self
- self.runner_callbacks.playbook = self
-
- if inventory is None:
- self.inventory = ansible.inventory.Inventory(host_list)
- self.inventory.subset(subset)
- else:
- self.inventory = inventory
-
- if self.module_path is not None:
- utils.plugins.module_finder.add_directory(self.module_path)
-
- self.basedir = os.path.dirname(playbook) or '.'
- utils.plugins.push_basedir(self.basedir)
-
- # let inventory know the playbook basedir so it can load more vars
- self.inventory.set_playbook_basedir(self.basedir)
-
- vars = extra_vars.copy()
- vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.inventory.basedir() is not None:
- vars['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- vars['inventory_file'] = self.inventory.src()
-
- self.filename = playbook
- (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
- ansible.callbacks.load_callback_plugins()
- ansible.callbacks.set_playbook(self.callbacks, self)
-
- self._ansible_version = utils.version_info(gitinfo=True)
-
- # *****************************************************
-
- def _get_playbook_vars(self, play_ds, existing_vars):
- '''
- Gets the vars specified with the play and blends them
- with any existing vars that have already been read in
- '''
- new_vars = existing_vars.copy()
- if 'vars' in play_ds:
- if isinstance(play_ds['vars'], dict):
- new_vars.update(play_ds['vars'])
- elif isinstance(play_ds['vars'], list):
- for v in play_ds['vars']:
- new_vars.update(v)
- return new_vars
-
- # *****************************************************
-
- def _get_include_info(self, play_ds, basedir, existing_vars={}):
- '''
- Gets any key=value pairs specified with the included file
- name and returns the merged vars along with the path
- '''
- new_vars = existing_vars.copy()
- tokens = split_args(play_ds.get('include', ''))
- for t in tokens[1:]:
- try:
- (k,v) = unquote(t).split("=", 1)
- new_vars[k] = template(basedir, v, new_vars)
- except ValueError, e:
- raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
-
- return (new_vars, unquote(tokens[0]))
-
- # *****************************************************
-
- def _get_playbook_vars_files(self, play_ds, existing_vars_files):
- new_vars_files = list(existing_vars_files)
- if 'vars_files' in play_ds:
- new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
- return new_vars_files
-
- # *****************************************************
-
- def _extend_play_vars(self, play, vars={}):
- '''
- Extends the given play's variables with the additional specified vars.
- '''
-
- if 'vars' not in play or not play['vars']:
- # someone left out or put an empty "vars:" entry in their playbook
- return vars.copy()
-
- play_vars = None
- if isinstance(play['vars'], dict):
- play_vars = play['vars'].copy()
- play_vars.update(vars)
- elif isinstance(play['vars'], list):
- # nobody should really do this, but handle vars: a=1 b=2
- play_vars = play['vars'][:]
- play_vars.extend([{k:v} for k,v in vars.iteritems()])
-
- return play_vars
-
- # *****************************************************
-
- def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
- '''
- run top level error checking on playbooks and allow them to include other playbooks.
- '''
-
- playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
- accumulated_plays = []
- play_basedirs = []
-
- if type(playbook_data) != list:
- raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
-
- basedir = os.path.dirname(path) or '.'
- utils.plugins.push_basedir(basedir)
- for play in playbook_data:
- if type(play) != dict:
- raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
-
- if 'include' in play:
- # a playbook (list of plays) decided to include some other list of plays
- # from another file. The result is a flat list of plays in the end.
-
- play_vars = self._get_playbook_vars(play, vars)
- play_vars_files = self._get_playbook_vars_files(play, vars_files)
- inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
- play_vars.update(inc_vars)
-
- included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
- (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
- for p in plays:
- # support for parameterized play includes works by passing
- # those variables along to the subservient play
- p['vars'] = self._extend_play_vars(p, play_vars)
- # now add in the vars_files
- p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
-
- accumulated_plays.extend(plays)
- play_basedirs.extend(basedirs)
-
- else:
-
- # this is a normal (non-included play)
- accumulated_plays.append(play)
- play_basedirs.append(basedir)
-
- return (accumulated_plays, play_basedirs)
-
- # *****************************************************
-
- def run(self):
- ''' run all patterns in the playbook '''
- plays = []
- matched_tags_all = set()
- unmatched_tags_all = set()
-
- # loop through all patterns and run them
- self.callbacks.on_start()
- for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
- play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
- assert play is not None
-
- matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
-
- matched_tags_all = matched_tags_all | matched_tags
- unmatched_tags_all = unmatched_tags_all | unmatched_tags
-
- # Remove tasks we wish to skip
- matched_tags = matched_tags - set(self.skip_tags)
-
- # if we have matched_tags, the play must be run.
- # if the play contains no tasks, assume we just want to gather facts
- # in this case there are actually 3 meta tasks (handler flushes) not 0
- # tasks, so that's why there's a check against 3
- if (len(matched_tags) > 0 or len(play.tasks()) == 3):
- plays.append(play)
-
- # if the playbook is invoked with --tags or --skip-tags that don't
- # exist at all in the playbooks then we need to raise an error so that
- # the user can correct the arguments.
- unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
- (matched_tags_all | unmatched_tags_all))
-
- for t in RESERVED_TAGS:
- unknown_tags.discard(t)
-
- if len(unknown_tags) > 0:
- for t in RESERVED_TAGS:
- unmatched_tags_all.discard(t)
- msg = 'tag(s) not found in playbook: %s. possible values: %s'
- unknown = ','.join(sorted(unknown_tags))
- unmatched = ','.join(sorted(unmatched_tags_all))
- raise errors.AnsibleError(msg % (unknown, unmatched))
-
- for play in plays:
- ansible.callbacks.set_play(self.callbacks, play)
- ansible.callbacks.set_play(self.runner_callbacks, play)
- if not self._run_play(play):
- break
-
- ansible.callbacks.set_play(self.callbacks, None)
- ansible.callbacks.set_play(self.runner_callbacks, None)
-
- # summarize the results
- results = {}
- for host in self.stats.processed.keys():
- results[host] = self.stats.summarize(host)
- return results
-
- # *****************************************************
-
- def _async_poll(self, poller, async_seconds, async_poll_interval):
- ''' launch an async job, if poll_interval is set, wait for completion '''
-
- results = poller.wait(async_seconds, async_poll_interval)
-
- # mark any hosts that are still listed as started as failed
- # since these likely got killed by async_wrapper
- for host in poller.hosts_to_poll:
- reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
- self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
- results['contacted'][host] = reason
-
- return results
-
- # *****************************************************
-
- def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
- ''' returns a list of hosts that haven't failed and aren't dark '''
-
- return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
-
- # *****************************************************
-
- def _run_task_internal(self, task, include_failed=False):
- ''' run a particular module step in a playbook '''
-
- hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
- self.inventory.restrict_to(hosts)
-
- runner = ansible.runner.Runner(
- pattern=task.play.hosts,
- inventory=self.inventory,
- module_name=task.module_name,
- module_args=task.module_args,
- forks=self.forks,
- remote_pass=self.remote_pass,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=task.remote_user,
- remote_port=task.play.remote_port,
- module_vars=task.module_vars,
- play_vars=task.play_vars,
- play_file_vars=task.play_file_vars,
- role_vars=task.role_vars,
- role_params=task.role_params,
- default_vars=task.default_vars,
- extra_vars=self.extra_vars,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- basedir=task.play.basedir,
- conditional=task.when,
- callbacks=self.runner_callbacks,
- transport=task.transport,
- is_playbook=True,
- check=self.check,
- diff=self.diff,
- environment=task.environment,
- complex_args=task.args,
- accelerate=task.play.accelerate,
- accelerate_port=task.play.accelerate_port,
- accelerate_ipv6=task.play.accelerate_ipv6,
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
- vault_pass = self.vault_password,
- run_hosts=hosts,
- no_log=task.no_log,
- run_once=task.run_once,
- become=task.become,
- become_method=task.become_method,
- become_user=task.become_user,
- become_pass=task.become_pass,
- )
-
- runner.module_vars.update({'play_hosts': hosts})
- runner.module_vars.update({'ansible_version': self._ansible_version})
-
- if task.async_seconds == 0:
- results = runner.run()
- else:
- results, poller = runner.run_async(task.async_seconds)
- self.stats.compute(results)
- if task.async_poll_interval > 0:
- # if not polling, playbook requested fire and forget, so don't poll
- results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
- else:
- for (host, res) in results.get('contacted', {}).iteritems():
- self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
-
- contacted = results.get('contacted',{})
- dark = results.get('dark', {})
-
- self.inventory.lift_restriction()
-
- if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
- return None
-
- return results
-
- # *****************************************************
-
- def _run_task(self, play, task, is_handler):
- ''' run a single task in the playbook and recursively run any subtasks. '''
-
- ansible.callbacks.set_task(self.callbacks, task)
- ansible.callbacks.set_task(self.runner_callbacks, task)
-
- if task.role_name:
- name = '%s | %s' % (task.role_name, task.name)
- else:
- name = task.name
-
- try:
- # v1 HACK: we don't have enough information to template many names
- # at this point. Rather than making this work for all cases in
- # v1, just make this degrade gracefully. Will fix in v2
- name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
- except:
- pass
-
- self.callbacks.on_task_start(name, is_handler)
- if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return True
-
- # template ignore_errors
- # TODO: Is this needed here? cond is templated again in
- # check_conditional after some more manipulations.
- # TODO: we don't have enough information here to template cond either
- # (see note on templating name above)
- cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
- task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
-
- # load up an appropriate ansible runner to run the task in parallel
- include_failed = is_handler and play.force_handlers
- results = self._run_task_internal(task, include_failed=include_failed)
-
- # if no hosts are matched, carry on
- hosts_remaining = True
- if results is None:
- hosts_remaining = False
- results = {}
-
- contacted = results.get('contacted', {})
- self.stats.compute(results, ignore_errors=task.ignore_errors)
-
- def _register_play_vars(host, result):
- # when 'register' is used, persist the result in the vars cache
- # rather than the setup cache - vars should be transient between
- # playbook executions
- if 'stdout' in result and 'stdout_lines' not in result:
- result['stdout_lines'] = result['stdout'].splitlines()
- utils.update_hash(self.VARS_CACHE, host, {task.register: result})
-
- def _save_play_facts(host, facts):
- # saves play facts in SETUP_CACHE, unless the module executed was
- # set_fact, in which case we add them to the VARS_CACHE
- if task.module_name in ('set_fact', 'include_vars'):
- utils.update_hash(self.VARS_CACHE, host, facts)
- else:
- utils.update_hash(self.SETUP_CACHE, host, facts)
-
- # add facts to the global setup cache
- for host, result in contacted.iteritems():
- if 'results' in result:
- # task ran with_ lookup plugin, so facts are encapsulated in
- # multiple list items in the results key
- for res in result['results']:
- if type(res) == dict:
- facts = res.get('ansible_facts', {})
- _save_play_facts(host, facts)
- else:
- # when facts are returned, persist them in the setup cache
- facts = result.get('ansible_facts', {})
- _save_play_facts(host, facts)
-
- # if requested, save the result into the registered variable name
- if task.register:
- _register_play_vars(host, result)
-
- # also have to register some failed, but ignored, tasks
- if task.ignore_errors and task.register:
- failed = results.get('failed', {})
- for host, result in failed.iteritems():
- _register_play_vars(host, result)
-
- # flag which notify handlers need to be run
- if len(task.notify) > 0:
- for host, results in results.get('contacted',{}).iteritems():
- if results.get('changed', False):
- for handler_name in task.notify:
- self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return hosts_remaining
-
- # *****************************************************
-
- def _flag_handler(self, play, handler_name, host):
- '''
- if a task has any notify elements, flag handlers for run
- at end of execution cycle for hosts that have indicated
- changes have been made
- '''
-
- found = False
- for x in play.handlers():
- if handler_name == template(play.basedir, x.name, x.module_vars):
- found = True
- self.callbacks.on_notify(host, x.name)
- x.notified_by.append(host)
- if not found:
- raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
-
- # *****************************************************
-
- def _do_setup_step(self, play):
- ''' get facts from the remote system '''
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
- host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
- if len(host_list) == 0:
- return {}
- elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
- return {}
-
- self.callbacks.on_setup()
- self.inventory.restrict_to(host_list)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
-
- # push any variables down to the system
- setup_results = ansible.runner.Runner(
- basedir=self.basedir,
- pattern=play.hosts,
- module_name='setup',
- module_args={},
- inventory=self.inventory,
- forks=self.forks,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=play.remote_user,
- remote_pass=self.remote_pass,
- remote_port=play.remote_port,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- callbacks=self.runner_callbacks,
- become=play.become,
- become_method=play.become_method,
- become_user=play.become_user,
- become_pass=self.become_pass,
- vault_pass=self.vault_password,
- transport=play.transport,
- is_playbook=True,
- module_vars=play.vars,
- play_vars=play.vars,
- play_file_vars=play.vars_file_vars,
- role_vars=play.role_vars,
- default_vars=play.default_vars,
- check=self.check,
- diff=self.diff,
- accelerate=play.accelerate,
- accelerate_port=play.accelerate_port,
- ).run()
- self.stats.compute(setup_results, setup=True)
-
- self.inventory.lift_restriction()
-
- # now for each result, load into the setup cache so we can
- # let runner template out future commands
- setup_ok = setup_results.get('contacted', {})
- for (host, result) in setup_ok.iteritems():
- utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
- utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
- return setup_results
-
- # *****************************************************
-
-
- def generate_retry_inventory(self, replay_hosts):
- '''
- called by /usr/bin/ansible when a playbook run fails. It generates an inventory
- that allows re-running on ONLY the failed hosts. This may duplicate some
- variable information in group_vars/host_vars but that is ok, and expected.
- '''
-
- buf = StringIO.StringIO()
- for x in replay_hosts:
- buf.write("%s\n" % x)
- basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
- filename = "%s.retry" % os.path.basename(self.filename)
- filename = filename.replace(".yml","")
- filename = os.path.join(basedir, filename)
-
- try:
- if not os.path.exists(basedir):
- os.makedirs(basedir)
-
- fd = open(filename, 'w')
- fd.write(buf.getvalue())
- fd.close()
- except:
- ansible.callbacks.display(
- "\nERROR: could not create retry file. Check the value of \n"
- + "the configuration variable 'retry_files_save_path' or set \n"
- + "'retry_files_enabled' to False to avoid this message.\n",
- color='red'
- )
- return None
-
- return filename
-
- # *****************************************************
- def tasks_to_run_in_play(self, play):
-
- tasks = []
-
- for task in play.tasks():
- # only run the task if the requested tags match or has 'always' tag
- u = set(['untagged'])
- task_set = set(task.tags)
-
- if 'always' in task.tags:
- should_run = True
- else:
- if 'all' in self.only_tags:
- should_run = True
- else:
- should_run = False
- if 'tagged' in self.only_tags:
- if task_set != u:
- should_run = True
- elif 'untagged' in self.only_tags:
- if task_set == u:
- should_run = True
- else:
- if task_set.intersection(self.only_tags):
- should_run = True
-
- # Check for tags that we need to skip
- if 'all' in self.skip_tags:
- should_run = False
- else:
- if 'tagged' in self.skip_tags:
- if task_set != u:
- should_run = False
- elif 'untagged' in self.skip_tags:
- if task_set == u:
- should_run = False
- else:
- if should_run:
- if task_set.intersection(self.skip_tags):
- should_run = False
-
- if should_run:
- tasks.append(task)
-
- return tasks
-
- # *****************************************************
- def _run_play(self, play):
- ''' run a list of tasks for a given pattern, in order '''
-
- self.callbacks.on_play_start(play.name)
- # Get the hosts for this play
- play._play_hosts = self.inventory.list_hosts(play.hosts)
- # if no hosts matches this play, drop out
- if not play._play_hosts:
- self.callbacks.on_no_hosts_matched()
- return True
-
- # get facts from system
- self._do_setup_step(play)
-
- # now with that data, handle contentional variable file imports!
- all_hosts = self._trim_unavailable_hosts(play._play_hosts)
- play.update_vars_files(all_hosts, vault_password=self.vault_password)
- hosts_count = len(all_hosts)
-
- if play.serial.endswith("%"):
-
- # This is a percentage, so calculate it based on the
- # number of hosts
- serial_pct = int(play.serial.replace("%",""))
- serial = int((serial_pct/100.0) * len(all_hosts))
-
- # Ensure that no matter how small the percentage, serial
- # can never fall below 1, so that things actually happen
- serial = max(serial, 1)
- else:
- serial = int(play.serial)
-
- serialized_batch = []
- if serial <= 0:
- serialized_batch = [all_hosts]
- else:
- # do N forks all the way through before moving to next
- while len(all_hosts) > 0:
- play_hosts = []
- for x in range(serial):
- if len(all_hosts) > 0:
- play_hosts.append(all_hosts.pop(0))
- serialized_batch.append(play_hosts)
-
- task_errors = False
- for on_hosts in serialized_batch:
-
- # restrict the play to just the hosts we have in our on_hosts block that are
- # available.
- play._play_hosts = self._trim_unavailable_hosts(on_hosts)
- self.inventory.also_restrict_to(on_hosts)
-
- for task in self.tasks_to_run_in_play(play):
-
- if task.meta is not None:
- # meta tasks can force handlers to run mid-play
- if task.meta == 'flush_handlers':
- self.run_handlers(play)
-
- # skip calling the handler till the play is finished
- continue
-
- if not self._run_task(play, task, False):
- # whether no hosts matched is fatal or not depends if it was on the initial step.
- # if we got exactly no hosts on the first step (setup!) then the host group
- # just didn't match anything and that's ok
- return False
-
- # Get a new list of what hosts are left as available, the ones that
- # did not go fail/dark during the task
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- # Set max_fail_pct to 0, So if any hosts fails, bail out
- if task.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
-
- # If threshold for max nodes failed is exceeded, bail out.
- if play.serial > 0:
- # if serial is set, we need to shorten the size of host_count
- play_count = len(play._play_hosts)
- if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
- host_list = None
- else:
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
-
- # if no hosts remain, drop out
- if not host_list:
- if play.force_handlers:
- task_errors = True
- break
- else:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- # lift restrictions after each play finishes
- self.inventory.lift_also_restriction()
-
- if task_errors and not play.force_handlers:
- # if there were failed tasks and handler execution
- # is not forced, quit the play with an error
- return False
- else:
- # no errors, go ahead and execute all handlers
- if not self.run_handlers(play):
- return False
-
- return True
-
-
- def run_handlers(self, play):
- on_hosts = play._play_hosts
- hosts_count = len(on_hosts)
- for task in play.tasks():
- if task.meta is not None:
-
- fired_names = {}
- for handler in play.handlers():
- if len(handler.notified_by) > 0:
- self.inventory.restrict_to(handler.notified_by)
-
- # Resolve the variables first
- handler_name = template(play.basedir, handler.name, handler.module_vars)
- if handler_name not in fired_names:
- self._run_task(play, handler, True)
- # prevent duplicate handler includes from running more than once
- fired_names[handler_name] = 1
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
- if handler.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
- if not host_list and not play.force_handlers:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- self.inventory.lift_restriction()
- new_list = handler.notified_by[:]
- for host in handler.notified_by:
- if host in on_hosts:
- while host in new_list:
- new_list.remove(host)
- handler.notified_by = new_list
-
- continue
-
- return True
diff --git a/v1/ansible/playbook/play.py b/v1/ansible/playbook/play.py
deleted file mode 100644
index 6ee85e0bf4..0000000000
--- a/v1/ansible/playbook/play.py
+++ /dev/null
@@ -1,949 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.playbook.task import Task
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.constants as C
-import pipes
-import shlex
-import os
-import sys
-import uuid
-
-
-class Play(object):
-
- _pb_common = [
- 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
- 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
- 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
- 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
- 'vault_password',
- ]
-
- __slots__ = _pb_common + [
- '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
- 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
- 'role_vars', 'transport', 'vars_file_vars',
- ]
-
- # to catch typos and so forth -- these are userland names
- # and don't line up 1:1 with how they are stored
- VALID_KEYS = frozenset(_pb_common + [
- 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
- 'pre_tasks', 'role_names', 'tasks', 'user',
- ])
-
- # *************************************************
-
- def __init__(self, playbook, ds, basedir, vault_password=None):
- ''' constructor loads from a play datastructure '''
-
- for x in ds.keys():
- if not x in Play.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
-
- # allow all playbook keys to be set by --extra-vars
- self.vars = ds.get('vars', {})
- self.vars_prompt = ds.get('vars_prompt', {})
- self.playbook = playbook
- self.vars = self._get_vars()
- self.vars_file_vars = dict() # these are vars read in from vars_files:
- self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
- self.basedir = basedir
- self.roles = ds.get('roles', None)
- self.tags = ds.get('tags', None)
- self.vault_password = vault_password
- self.environment = ds.get('environment', {})
-
- if self.tags is None:
- self.tags = []
- elif type(self.tags) in [ str, unicode ]:
- self.tags = self.tags.split(",")
- elif type(self.tags) != list:
- self.tags = []
-
- # make sure we have some special internal variables set, which
- # we use later when loading tasks and handlers
- load_vars = dict()
- load_vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.playbook.inventory.basedir() is not None:
- load_vars['inventory_dir'] = self.playbook.inventory.basedir()
- if self.playbook.inventory.src() is not None:
- load_vars['inventory_file'] = self.playbook.inventory.src()
-
- # We first load the vars files from the datastructure
- # so we have the default variables to pass into the roles
- self.vars_files = ds.get('vars_files', [])
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
- processed_vars_files = self._update_vars_files_for_host(None)
-
- # now we load the roles into the datastructure
- self.included_roles = []
- ds = self._load_roles(self.roles, ds)
-
- # and finally re-process the vars files as they may have been updated
- # by the included roles, but exclude any which have been processed
- self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
-
- self._update_vars_files_for_host(None)
-
- # template everything to be efficient, but do not pre-mature template
- # tasks/handlers as they may have inventory scope overrides. We also
- # create a set of temporary variables for templating, so we don't
- # trample on the existing vars structures
- _tasks = ds.pop('tasks', [])
- _handlers = ds.pop('handlers', [])
-
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
-
- try:
- ds = template(basedir, ds, temp_vars)
- except errors.AnsibleError, e:
- utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
-
- ds['tasks'] = _tasks
- ds['handlers'] = _handlers
-
- self._ds = ds
-
- hosts = ds.get('hosts')
- if hosts is None:
- raise errors.AnsibleError('hosts declaration is required')
- elif isinstance(hosts, list):
- try:
- hosts = ';'.join(hosts)
- except TypeError,e:
- raise errors.AnsibleError('improper host declaration: %s' % str(e))
-
- self.serial = str(ds.get('serial', 0))
- self.hosts = hosts
- self.name = ds.get('name', self.hosts)
- self._tasks = ds.get('tasks', [])
- self._handlers = ds.get('handlers', [])
- self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
- self.remote_port = ds.get('port', self.playbook.remote_port)
- self.transport = ds.get('connection', self.playbook.transport)
- self.remote_port = self.remote_port
- self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
- self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
- self.accelerate_port = ds.get('accelerate_port', None)
- self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
- self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
- self.no_log = utils.boolean(ds.get('no_log', 'false'))
- self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
-
- # Fail out if user specifies conflicting privilege escalations
- if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
- if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
- if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
-
- # become settings are inherited and updated normally
- self.become = ds.get('become', self.playbook.become)
- self.become_method = ds.get('become_method', self.playbook.become_method)
- self.become_user = ds.get('become_user', self.playbook.become_user)
-
- # Make sure current play settings are reflected in become fields
- if 'sudo' in ds:
- self.become=ds['sudo']
- self.become_method='sudo'
- if 'sudo_user' in ds:
- self.become_user=ds['sudo_user']
- elif 'su' in ds:
- self.become=True
- self.become=ds['su']
- self.become_method='su'
- if 'su_user' in ds:
- self.become_user=ds['su_user']
-
- # gather_facts is not a simple boolean, as None means that a 'smart'
- # fact gathering mode will be used, so we need to be careful here as
- # calling utils.boolean(None) returns False
- self.gather_facts = ds.get('gather_facts', None)
- if self.gather_facts is not None:
- self.gather_facts = utils.boolean(self.gather_facts)
-
- load_vars['role_names'] = ds.get('role_names', [])
-
- self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
- self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
-
- # apply any missing tags to role tasks
- self._late_merge_role_tags()
-
- # place holder for the discovered hosts to be used in this play
- self._play_hosts = None
-
- # *************************************************
-
- def _get_role_path(self, role):
- """
- Returns the path on disk to the directory containing
- the role directories like tasks, templates, etc. Also
- returns any variables that were included with the role
- """
- orig_path = template(self.basedir,role,self.vars)
-
- role_vars = {}
- if type(orig_path) == dict:
- # what, not a path?
- role_name = orig_path.get('role', None)
- if role_name is None:
- raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
- role_vars = orig_path
- else:
- role_name = utils.role_spec_parse(orig_path)["name"]
-
- role_path = None
-
- possible_paths = [
- utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
- utils.path_dwim(self.basedir, role_name)
- ]
-
- if C.DEFAULT_ROLES_PATH:
- search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
- for loc in search_locations:
- loc = os.path.expanduser(loc)
- possible_paths.append(utils.path_dwim(loc, role_name))
-
- for path_option in possible_paths:
- if os.path.isdir(path_option):
- role_path = path_option
- break
-
- if role_path is None:
- raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
-
- return (role_path, role_vars)
-
- def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
- # this number is arbitrary, but it seems sane
- if level > 20:
- raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
- for role in roles:
- role_path,role_vars = self._get_role_path(role)
-
- # save just the role params for this role, which exclude the special
- # keywords 'role', 'tags', and 'when'.
- role_params = role_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in role_params:
- del role_params[item]
-
- role_vars = utils.combine_vars(passed_vars, role_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- if not isinstance(vars_data, dict):
- raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
- role_vars = utils.combine_vars(vars_data, role_vars)
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
- defaults_data = {}
- if os.path.isfile(defaults):
- defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
-
- # the meta directory contains the yaml that should
- # hold the list of dependencies (if any)
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
- if os.path.isfile(meta):
- data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if data:
- dependencies = data.get('dependencies',[])
- if dependencies is None:
- dependencies = []
- for dep in dependencies:
- allow_dupes = False
- (dep_path,dep_vars) = self._get_role_path(dep)
-
- # save the dep params, just as we did above
- dep_params = dep_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in dep_params:
- del dep_params[item]
-
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
- if os.path.isfile(meta):
- meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if meta_data:
- allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
-
- # if any tags were specified as role/dep variables, merge
- # them into the current dep_vars so they're passed on to any
- # further dependencies too, and so we only have one place
- # (dep_vars) to look for tags going forward
- def __merge_tags(var_obj):
- old_tags = dep_vars.get('tags', [])
- if isinstance(old_tags, basestring):
- old_tags = [old_tags, ]
- if isinstance(var_obj, dict):
- new_tags = var_obj.get('tags', [])
- if isinstance(new_tags, basestring):
- new_tags = [new_tags, ]
- else:
- new_tags = []
- return list(set(old_tags).union(set(new_tags)))
-
- dep_vars['tags'] = __merge_tags(role_vars)
- dep_vars['tags'] = __merge_tags(passed_vars)
-
- # if tags are set from this role, merge them
- # into the tags list for the dependent role
- if "tags" in passed_vars:
- for included_role_dep in dep_stack:
- included_dep_name = included_role_dep[0]
- included_dep_vars = included_role_dep[2]
- if included_dep_name == dep:
- if "tags" in included_dep_vars:
- included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
- else:
- included_dep_vars["tags"] = passed_vars["tags"][:]
-
- dep_vars = utils.combine_vars(passed_vars, dep_vars)
- dep_vars = utils.combine_vars(role_vars, dep_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- dep_vars = utils.combine_vars(dep_vars, vars_data)
- pass
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
- dep_defaults_data = {}
- if os.path.isfile(defaults):
- dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
- if 'role' in dep_vars:
- del dep_vars['role']
-
- if not allow_dupes:
- if dep in self.included_roles:
- # skip back to the top, since we don't want to
- # do anything else with this role
- continue
- else:
- self.included_roles.append(dep)
-
- def _merge_conditional(cur_conditionals, new_conditionals):
- if isinstance(new_conditionals, (basestring, bool)):
- cur_conditionals.append(new_conditionals)
- elif isinstance(new_conditionals, list):
- cur_conditionals.extend(new_conditionals)
-
- # pass along conditionals from roles to dep roles
- passed_when = passed_vars.get('when')
- role_when = role_vars.get('when')
- dep_when = dep_vars.get('when')
-
- tmpcond = []
- _merge_conditional(tmpcond, passed_when)
- _merge_conditional(tmpcond, role_when)
- _merge_conditional(tmpcond, dep_when)
-
- if len(tmpcond) > 0:
- dep_vars['when'] = tmpcond
-
- self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
- dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
-
- # only add the current role when we're at the top level,
- # otherwise we'll end up in a recursive loop
- if level == 0:
- self.included_roles.append(role)
- dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
- return dep_stack
-
- def _load_role_vars_files(self, vars_files):
- # process variables stored in vars/main.yml files
- role_vars = {}
- for filename in vars_files:
- if os.path.exists(filename):
- new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_vars:
- if type(new_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
- role_vars = utils.combine_vars(role_vars, new_vars)
-
- return role_vars
-
- def _load_role_defaults(self, defaults_files):
- # process default variables
- default_vars = {}
- for filename in defaults_files:
- if os.path.exists(filename):
- new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_default_vars:
- if type(new_default_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
- default_vars = utils.combine_vars(default_vars, new_default_vars)
-
- return default_vars
-
- def _load_roles(self, roles, ds):
- # a role is a name that auto-includes the following if they exist
- # <rolename>/tasks/main.yml
- # <rolename>/handlers/main.yml
- # <rolename>/vars/main.yml
- # <rolename>/library
- # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
-
- if roles is None:
- roles = []
- if type(roles) != list:
- raise errors.AnsibleError("value of 'roles:' must be a list")
-
- new_tasks = []
- new_handlers = []
- role_vars_files = []
- defaults_files = []
-
- pre_tasks = ds.get('pre_tasks', None)
- if type(pre_tasks) != list:
- pre_tasks = []
- for x in pre_tasks:
- new_tasks.append(x)
-
- # flush handlers after pre_tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- roles = self._build_role_dependencies(roles, [], {})
-
- # give each role an uuid and
- # make role_path available as variable to the task
- for idx, val in enumerate(roles):
- this_uuid = str(uuid.uuid4())
- roles[idx][-3]['role_uuid'] = this_uuid
- roles[idx][-3]['role_path'] = roles[idx][1]
-
- role_names = []
-
- for (role, role_path, role_vars, role_params, default_vars) in roles:
- # special vars must be extracted from the dict to the included tasks
- special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
- special_vars = {}
- for k in special_keys:
- if k in role_vars:
- special_vars[k] = role_vars[k]
-
- task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
- handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
- vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
- meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
- defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
-
- task = self._resolve_main(task_basepath)
- handler = self._resolve_main(handler_basepath)
- vars_file = self._resolve_main(vars_basepath)
- meta_file = self._resolve_main(meta_basepath)
- defaults_file = self._resolve_main(defaults_basepath)
-
- library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
-
- missing = lambda f: not os.path.isfile(f)
- if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
- raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
-
- if isinstance(role, dict):
- role_name = role['role']
- else:
- role_name = utils.role_spec_parse(role)["name"]
-
- role_names.append(role_name)
- if os.path.isfile(task):
- nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_tasks.append(nt)
- if os.path.isfile(handler):
- nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_handlers.append(nt)
- if os.path.isfile(vars_file):
- role_vars_files.append(vars_file)
- if os.path.isfile(defaults_file):
- defaults_files.append(defaults_file)
- if os.path.isdir(library):
- utils.plugins.module_finder.add_directory(library)
-
- tasks = ds.get('tasks', None)
- post_tasks = ds.get('post_tasks', None)
- handlers = ds.get('handlers', None)
- vars_files = ds.get('vars_files', None)
-
- if type(tasks) != list:
- tasks = []
- if type(handlers) != list:
- handlers = []
- if type(vars_files) != list:
- vars_files = []
- if type(post_tasks) != list:
- post_tasks = []
-
- new_tasks.extend(tasks)
- # flush handlers after tasks + role tasks
- new_tasks.append(dict(meta='flush_handlers'))
- new_tasks.extend(post_tasks)
- # flush handlers after post tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- new_handlers.extend(handlers)
-
- ds['tasks'] = new_tasks
- ds['handlers'] = new_handlers
- ds['role_names'] = role_names
-
- self.role_vars = self._load_role_vars_files(role_vars_files)
- self.default_vars = self._load_role_defaults(defaults_files)
-
- return ds
-
- # *************************************************
-
- def _resolve_main(self, basepath):
- ''' flexibly handle variations in main filenames '''
- # these filenames are acceptable:
- mains = (
- os.path.join(basepath, 'main'),
- os.path.join(basepath, 'main.yml'),
- os.path.join(basepath, 'main.yaml'),
- os.path.join(basepath, 'main.json'),
- )
- if sum([os.path.isfile(x) for x in mains]) > 1:
- raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
- else:
- for m in mains:
- if os.path.isfile(m):
- return m # exactly one main file
- return mains[0] # zero mains (we still need to return something)
-
- # *************************************************
-
- def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
- additional_conditions=None, original_file=None, role_name=None):
- ''' handle task and handler include statements '''
-
- results = []
- if tasks is None:
- # support empty handler files, and the like.
- tasks = []
- if additional_conditions is None:
- additional_conditions = []
- if vars is None:
- vars = {}
- if role_params is None:
- role_params = {}
- if default_vars is None:
- default_vars = {}
- if become_vars is None:
- become_vars = {}
-
- old_conditions = list(additional_conditions)
-
- for x in tasks:
-
- # prevent assigning the same conditions to each task on an include
- included_additional_conditions = list(old_conditions)
-
- if not isinstance(x, dict):
- raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
-
- # evaluate privilege escalation vars for current and child tasks
- included_become_vars = {}
- for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
- if k in x:
- included_become_vars[k] = x[k]
- elif k in become_vars:
- included_become_vars[k] = become_vars[k]
- x[k] = become_vars[k]
-
- task_vars = vars.copy()
- if original_file:
- task_vars['_original_file'] = original_file
-
- if 'meta' in x:
- if x['meta'] == 'flush_handlers':
- if role_name and 'role_name' not in x:
- x['role_name'] = role_name
- results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
- continue
-
- if 'include' in x:
- tokens = split_args(str(x['include']))
- included_additional_conditions = list(additional_conditions)
- include_vars = {}
- for k in x:
- if k.startswith("with_"):
- if original_file:
- offender = " (in %s)" % original_file
- else:
- offender = ""
- utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
- elif k.startswith("when_"):
- utils.deprecated("\"when_<criteria>:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
- elif k == 'when':
- if isinstance(x[k], (basestring, bool)):
- included_additional_conditions.append(x[k])
- elif type(x[k]) is list:
- included_additional_conditions.extend(x[k])
- elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
- continue
- else:
- include_vars[k] = x[k]
-
- # get any role parameters specified
- role_params = x.get('role_params', {})
-
- # get any role default variables specified
- default_vars = x.get('default_vars', {})
- if not default_vars:
- default_vars = self.default_vars
- else:
- default_vars = utils.combine_vars(self.default_vars, default_vars)
-
- # append the vars defined with the include (from above)
- # as well as the old-style 'vars' element. The old-style
- # vars are given higher precedence here (just in case)
- task_vars = utils.combine_vars(task_vars, include_vars)
- if 'vars' in x:
- task_vars = utils.combine_vars(task_vars, x['vars'])
-
- new_role = None
- if 'role_name' in x:
- new_role = x['role_name']
-
- mv = task_vars.copy()
- for t in tokens[1:]:
- (k,v) = t.split("=", 1)
- v = unquote(v)
- mv[k] = template(self.basedir, v, mv)
- dirname = self.basedir
- if original_file:
- dirname = os.path.dirname(original_file)
-
- # temp vars are used here to avoid trampling on the existing vars structures
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, mv)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- include_file = template(dirname, tokens[0], temp_vars)
- include_filename = utils.path_dwim(dirname, include_file)
-
- data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
- if 'role_name' in x and data is not None:
- for y in data:
- if isinstance(y, dict) and 'include' in y:
- y['role_name'] = new_role
- loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
- results += loaded
- elif type(x) == dict:
- task = Task(
- self, x,
- module_vars=task_vars,
- play_vars=self.vars,
- play_file_vars=self.vars_file_vars,
- role_vars=self.role_vars,
- role_params=role_params,
- default_vars=default_vars,
- additional_conditions=list(additional_conditions),
- role_name=role_name
- )
- results.append(task)
- else:
- raise Exception("unexpected task type")
-
- for x in results:
- if self.tags is not None:
- x.tags.extend(self.tags)
-
- return results
-
- # *************************************************
-
- def tasks(self):
- ''' return task objects for this play '''
- return self._tasks
-
- def handlers(self):
- ''' return handler objects for this play '''
- return self._handlers
-
- # *************************************************
-
- def _get_vars(self):
- ''' load the vars section from a play, accounting for all sorts of variable features
- including loading from yaml files, prompting, and conditional includes of the first
- file found in a list. '''
-
- if self.vars is None:
- self.vars = {}
-
- if type(self.vars) not in [dict, list]:
- raise errors.AnsibleError("'vars' section must contain only key/value pairs")
-
- vars = {}
-
- # translate a list of vars into a dict
- if type(self.vars) == list:
- for item in self.vars:
- if getattr(item, 'items', None) is None:
- raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
- k, v = item.items()[0]
- vars[k] = v
- else:
- vars.update(self.vars)
-
- if type(self.vars_prompt) == list:
- for var in self.vars_prompt:
- if not 'name' in var:
- raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
-
- vname = var['name']
- prompt = var.get("prompt", vname)
- default = var.get("default", None)
- private = var.get("private", True)
-
- confirm = var.get("confirm", False)
- encrypt = var.get("encrypt", None)
- salt_size = var.get("salt_size", None)
- salt = var.get("salt", None)
-
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- vname, private, prompt, encrypt, confirm, salt_size, salt, default
- )
-
- elif type(self.vars_prompt) == dict:
- for (vname, prompt) in self.vars_prompt.iteritems():
- prompt_msg = "%s: " % prompt
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- varname=vname, private=False, prompt=prompt_msg, default=None
- )
-
- else:
- raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
-
- if type(self.playbook.extra_vars) == dict:
- vars = utils.combine_vars(vars, self.playbook.extra_vars)
-
- return vars
-
- # *************************************************
-
- def update_vars_files(self, hosts, vault_password=None):
- ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
-
- # now loop through all the hosts...
- for h in hosts:
- self._update_vars_files_for_host(h, vault_password=vault_password)
-
- # *************************************************
-
- def compare_tags(self, tags):
- ''' given a list of tags that the user has specified, return two lists:
- matched_tags: tags were found within the current play and match those given
- by the user
- unmatched_tags: tags that were found within the current play but do not match
- any provided by the user '''
-
- # gather all the tags in all the tasks and handlers into one list
- # FIXME: isn't this in self.tags already?
-
- all_tags = []
- for task in self._tasks:
- if not task.meta:
- all_tags.extend(task.tags)
- for handler in self._handlers:
- all_tags.extend(handler.tags)
-
- # compare the lists of tags using sets and return the matched and unmatched
- all_tags_set = set(all_tags)
- tags_set = set(tags)
-
- matched_tags = all_tags_set.intersection(tags_set)
- unmatched_tags = all_tags_set.difference(tags_set)
-
- a = set(['always'])
- u = set(['untagged'])
- if 'always' in all_tags_set:
- matched_tags = matched_tags.union(a)
- unmatched_tags = all_tags_set.difference(a)
-
- if 'all' in tags_set:
- matched_tags = matched_tags.union(all_tags_set)
- unmatched_tags = set()
-
- if 'tagged' in tags_set:
- matched_tags = all_tags_set.difference(u)
- unmatched_tags = u
-
- if 'untagged' in tags_set and 'untagged' in all_tags_set:
- matched_tags = matched_tags.union(u)
- unmatched_tags = unmatched_tags.difference(u)
-
- return matched_tags, unmatched_tags
-
- # *************************************************
-
- def _late_merge_role_tags(self):
- # build a local dict of tags for roles
- role_tags = {}
- for task in self._ds['tasks']:
- if 'role_name' in task:
- this_role = task['role_name'] + "-" + task['vars']['role_uuid']
-
- if this_role not in role_tags:
- role_tags[this_role] = []
-
- if 'tags' in task['vars']:
- if isinstance(task['vars']['tags'], basestring):
- role_tags[this_role] += shlex.split(task['vars']['tags'])
- else:
- role_tags[this_role] += task['vars']['tags']
-
- # apply each role's tags to its tasks
- for idx, val in enumerate(self._tasks):
- if getattr(val, 'role_name', None) is not None:
- this_role = val.role_name + "-" + val.module_vars['role_uuid']
- if this_role in role_tags:
- self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
-
- # *************************************************
-
- def _update_vars_files_for_host(self, host, vault_password=None):
-
- def generate_filenames(host, inject, filename):
-
- """ Render the raw filename into 3 forms """
-
- # filename2 is the templated version of the filename, which will
- # be fully rendered if any variables contained within it are
- # non-inventory related
- filename2 = template(self.basedir, filename, self.vars)
-
- # filename3 is the same as filename2, but when the host object is
- # available, inventory variables will be expanded as well since the
- # name is templated with the injected variables
- filename3 = filename2
- if host is not None:
- filename3 = template(self.basedir, filename2, inject)
-
- # filename4 is the dwim'd path, but may also be mixed-scope, so we use
- # both play scoped vars and host scoped vars to template the filepath
- if utils.contains_vars(filename3) and host is not None:
- inject.update(self.vars)
- filename4 = template(self.basedir, filename3, inject)
- filename4 = utils.path_dwim(self.basedir, filename4)
- else:
- filename4 = utils.path_dwim(self.basedir, filename3)
-
- return filename2, filename3, filename4
-
-
- def update_vars_cache(host, data, target_filename=None):
-
- """ update a host's varscache with new var data """
-
- self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
- if target_filename:
- self.playbook.callbacks.on_import_for_host(host, target_filename)
-
- def process_files(filename, filename2, filename3, filename4, host=None):
-
- """ pseudo-algorithm for deciding where new vars should go """
-
- data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
- if data:
- if type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
- if host is not None:
- target_filename = None
- if utils.contains_vars(filename2):
- if not utils.contains_vars(filename3):
- target_filename = filename3
- else:
- target_filename = filename4
- update_vars_cache(host, data, target_filename=target_filename)
- else:
- self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
- # we did process this file
- return True
- # we did not process this file
- return False
-
- # Enforce that vars_files is always a list
- if type(self.vars_files) != list:
- self.vars_files = [ self.vars_files ]
-
- # Build an inject if this is a host run started by self.update_vars_files
- if host is not None:
- inject = {}
- inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
- inject.update(self.playbook.SETUP_CACHE.get(host, {}))
- inject.update(self.playbook.VARS_CACHE.get(host, {}))
- else:
- inject = None
-
- processed = []
- for filename in self.vars_files:
- if type(filename) == list:
- # loop over all filenames, loading the first one, and failing if none found
- found = False
- sequence = []
- for real_filename in filename:
- filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
- sequence.append(filename4)
- if os.path.exists(filename4):
- found = True
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
- elif host is not None:
- self.playbook.callbacks.on_not_import_for_host(host, filename4)
- if found:
- break
- if not found and host is not None:
- raise errors.AnsibleError(
- "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
- )
- else:
- # just one filename supplied, load it!
- filename2, filename3, filename4 = generate_filenames(host, inject, filename)
- if utils.contains_vars(filename4):
- continue
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
-
- return processed
diff --git a/v1/ansible/playbook/task.py b/v1/ansible/playbook/task.py
deleted file mode 100644
index 70c1bc8df6..0000000000
--- a/v1/ansible/playbook/task.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import errors
-from ansible import utils
-from ansible.module_utils.splitter import split_args
-import os
-import ansible.utils.template as template
-import sys
-
-class Task(object):
-
- _t_common = [
- 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
- 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
- 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
- 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
- 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
- ]
-
- __slots__ = [
- 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
- 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
- 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
- ] + _t_common
-
- # to prevent typos and such
- VALID_KEYS = frozenset([
- 'async', 'connection', 'include', 'poll',
- ] + _t_common)
-
- def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
- ''' constructor loads from a task or handler datastructure '''
-
- # meta directives are used to tell things like ansible/playbook to run
- # operations like handler execution. Meta tasks are not executed
- # normally.
- if 'meta' in ds:
- self.meta = ds['meta']
- self.tags = []
- self.module_vars = module_vars
- self.role_name = role_name
- return
- else:
- self.meta = None
-
-
- library = os.path.join(play.basedir, 'library')
- if os.path.exists(library):
- utils.plugins.module_finder.add_directory(library)
-
- for x in ds.keys():
-
- # code to allow for saying "modulename: args" versus "action: modulename args"
- if x in utils.plugins.module_finder:
-
- if 'action' in ds:
- raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
- if isinstance(ds[x], dict):
- if 'args' in ds:
- raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
- ds['args'] = ds[x]
- ds[x] = ''
- elif ds[x] is None:
- ds[x] = ''
- if not isinstance(ds[x], basestring):
- raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
- ds['action'] = x + " " + ds[x]
- ds.pop(x)
-
- # code to allow "with_glob" and to reference a lookup plugin named glob
- elif x.startswith("with_"):
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
-
- plugin_name = x.replace("with_","")
- if plugin_name in utils.plugins.lookup_loader:
- ds['items_lookup_plugin'] = plugin_name
- ds['items_lookup_terms'] = ds[x]
- ds.pop(x)
- else:
- raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
-
- elif x in [ 'changed_when', 'failed_when', 'when']:
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
- # Only a variable, no logic
- if (param.startswith('{{') and
- param.find('}}') == len(ds[x]) - 2 and
- param.find('|') == -1):
- utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
- elif x.startswith("when_"):
- utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
-
- if 'when' in ds:
- raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
- when_name = x.replace("when_","")
- ds['when'] = "%s %s" % (when_name, ds[x])
- ds.pop(x)
- elif not x in Task.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
-
- self.module_vars = module_vars
- self.play_vars = play_vars
- self.play_file_vars = play_file_vars
- self.role_vars = role_vars
- self.role_params = role_params
- self.default_vars = default_vars
- self.play = play
-
- # load various attributes
- self.name = ds.get('name', None)
- self.tags = [ 'untagged' ]
- self.register = ds.get('register', None)
- self.environment = ds.get('environment', play.environment)
- self.role_name = role_name
- self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
- self.run_once = utils.boolean(ds.get('run_once', 'false'))
-
- #Code to allow do until feature in a Task
- if 'until' in ds:
- if not ds.get('register'):
- raise errors.AnsibleError("register keyword is mandatory when using do until feature")
- self.module_vars['delay'] = ds.get('delay', 5)
- self.module_vars['retries'] = ds.get('retries', 3)
- self.module_vars['register'] = ds.get('register', None)
- self.until = ds.get('until')
- self.module_vars['until'] = self.until
-
- # rather than simple key=value args on the options line, these represent structured data and the values
- # can be hashes and lists, not just scalars
- self.args = ds.get('args', {})
-
- # get remote_user for task, then play, then playbook
- if ds.get('remote_user') is not None:
- self.remote_user = ds.get('remote_user')
- elif ds.get('remote_user', play.remote_user) is not None:
- self.remote_user = ds.get('remote_user', play.remote_user)
- else:
- self.remote_user = ds.get('remote_user', play.playbook.remote_user)
-
- # Fail out if user specifies privilege escalation params in conflict
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- self.become = utils.boolean(ds.get('become', play.become))
- self.become_method = ds.get('become_method', play.become_method)
- self.become_user = ds.get('become_user', play.become_user)
- self.become_pass = ds.get('become_pass', play.playbook.become_pass)
-
- # set only if passed in current task data
- if 'sudo' in ds or 'sudo_user' in ds:
- self.become_method='sudo'
-
- if 'sudo' in ds:
- self.become=ds['sudo']
- del ds['sudo']
- else:
- self.become=True
- if 'sudo_user' in ds:
- self.become_user = ds['sudo_user']
- del ds['sudo_user']
- if 'sudo_pass' in ds:
- self.become_pass = ds['sudo_pass']
- del ds['sudo_pass']
-
- elif 'su' in ds or 'su_user' in ds:
- self.become_method='su'
-
- if 'su' in ds:
- self.become=ds['su']
- else:
- self.become=True
- del ds['su']
- if 'su_user' in ds:
- self.become_user = ds['su_user']
- del ds['su_user']
- if 'su_pass' in ds:
- self.become_pass = ds['su_pass']
- del ds['su_pass']
-
- # Both are defined
- if ('action' in ds) and ('local_action' in ds):
- raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
- # Both are NOT defined
- elif (not 'action' in ds) and (not 'local_action' in ds):
- raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '<Unnamed>'))
- # Only one of them is defined
- elif 'local_action' in ds:
- self.action = ds.get('local_action', '')
- self.delegate_to = '127.0.0.1'
- else:
- self.action = ds.get('action', '')
- self.delegate_to = ds.get('delegate_to', None)
- self.transport = ds.get('connection', ds.get('transport', play.transport))
-
- if isinstance(self.action, dict):
- if 'module' not in self.action:
- raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
- if self.args:
- raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
- self.args = self.action
- self.action = self.args.pop('module')
-
- # delegate_to can use variables
- if not (self.delegate_to is None):
- # delegate_to: localhost should use local transport
- if self.delegate_to in ['127.0.0.1', 'localhost']:
- self.transport = 'local'
-
- # notified by is used by Playbook code to flag which hosts
- # need to run a notifier
- self.notified_by = []
-
- # if no name is specified, use the action line as the name
- if self.name is None:
- self.name = self.action
-
- # load various attributes
- self.when = ds.get('when', None)
- self.changed_when = ds.get('changed_when', None)
- self.failed_when = ds.get('failed_when', None)
-
- # combine the default and module vars here for use in templating
- all_vars = self.default_vars.copy()
- all_vars = utils.combine_vars(all_vars, self.play_vars)
- all_vars = utils.combine_vars(all_vars, self.play_file_vars)
- all_vars = utils.combine_vars(all_vars, self.role_vars)
- all_vars = utils.combine_vars(all_vars, self.module_vars)
- all_vars = utils.combine_vars(all_vars, self.role_params)
-
- self.async_seconds = ds.get('async', 0) # not async by default
- self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
- self.async_seconds = int(self.async_seconds)
- self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
- self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
- self.async_poll_interval = int(self.async_poll_interval)
- self.notify = ds.get('notify', [])
- self.first_available_file = ds.get('first_available_file', None)
-
- self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
- self.items_lookup_terms = ds.get('items_lookup_terms', None)
-
-
- self.ignore_errors = ds.get('ignore_errors', False)
- self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
-
- self.always_run = ds.get('always_run', False)
-
- # action should be a string
- if not isinstance(self.action, basestring):
- raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
-
- # notify can be a string or a list, store as a list
- if isinstance(self.notify, basestring):
- self.notify = [ self.notify ]
-
- # split the action line into a module name + arguments
- try:
- tokens = split_args(self.action)
- except Exception, e:
- if "unbalanced" in str(e):
- raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
- "Make sure quotes are matched or escaped properly")
- else:
- raise
- if len(tokens) < 1:
- raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
- self.module_name = tokens[0]
- self.module_args = ''
- if len(tokens) > 1:
- self.module_args = " ".join(tokens[1:])
-
- import_tags = self.module_vars.get('tags',[])
- if type(import_tags) in [int,float]:
- import_tags = str(import_tags)
- elif type(import_tags) in [str,unicode]:
- # allow the user to list comma delimited tags
- import_tags = import_tags.split(",")
-
- # handle mutually incompatible options
- incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
- if len(incompatibles) > 1:
- raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
-
- # make first_available_file accessible to Runner code
- if self.first_available_file:
- self.module_vars['first_available_file'] = self.first_available_file
- # make sure that the 'item' variable is set when using
- # first_available_file (issue #8220)
- if 'item' not in self.module_vars:
- self.module_vars['item'] = ''
-
- if self.items_lookup_plugin is not None:
- self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
- self.module_vars['items_lookup_terms'] = self.items_lookup_terms
-
- # allow runner to see delegate_to option
- self.module_vars['delegate_to'] = self.delegate_to
-
- # make some task attributes accessible to Runner code
- self.module_vars['ignore_errors'] = self.ignore_errors
- self.module_vars['register'] = self.register
- self.module_vars['changed_when'] = self.changed_when
- self.module_vars['failed_when'] = self.failed_when
- self.module_vars['always_run'] = self.always_run
-
- # tags allow certain parts of a playbook to be run without running the whole playbook
- apply_tags = ds.get('tags', None)
- if apply_tags is not None:
- if type(apply_tags) in [ str, unicode ]:
- self.tags.append(apply_tags)
- elif type(apply_tags) in [ int, float ]:
- self.tags.append(str(apply_tags))
- elif type(apply_tags) == list:
- self.tags.extend(apply_tags)
- self.tags.extend(import_tags)
-
- if len(self.tags) > 1:
- self.tags.remove('untagged')
-
- if additional_conditions:
- new_conditions = additional_conditions[:]
- if self.when:
- new_conditions.append(self.when)
- self.when = new_conditions
diff --git a/v1/ansible/runner/__init__.py b/v1/ansible/runner/__init__.py
deleted file mode 100644
index 4ff273778c..0000000000
--- a/v1/ansible/runner/__init__.py
+++ /dev/null
@@ -1,1517 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import multiprocessing
-import signal
-import os
-import pwd
-import Queue
-import random
-import traceback
-import tempfile
-import time
-import collections
-import socket
-import base64
-import sys
-import pipes
-import jinja2
-import subprocess
-import getpass
-
-import ansible.constants as C
-import ansible.inventory
-from ansible import utils
-from ansible.utils import template
-from ansible.utils import check_conditional
-from ansible.utils import string_functions
-from ansible import errors
-from ansible import module_common
-import poller
-import connection
-from return_data import ReturnData
-from ansible.callbacks import DefaultRunnerCallbacks, vv
-from ansible.module_common import ModuleReplacer
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.cache import FactCache
-from ansible.utils import update_hash
-
-module_replacer = ModuleReplacer(strip_comments=False)
-
-try:
- from hashlib import sha1
-except ImportError:
- from sha import sha as sha1
-
-HAS_ATFORK=True
-try:
- from Crypto.Random import atfork
-except ImportError:
- HAS_ATFORK=False
-
-multiprocessing_runner = None
-
-OUTPUT_LOCKFILE = tempfile.TemporaryFile()
-PROCESS_LOCKFILE = tempfile.TemporaryFile()
-
-################################################
-
-def _executor_hook(job_queue, result_queue, new_stdin):
-
- # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
- # this function also not present in CentOS 6
- if HAS_ATFORK:
- atfork()
-
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- while not job_queue.empty():
- try:
- host = job_queue.get(block=False)
- return_data = multiprocessing_runner._executor(host, new_stdin)
- result_queue.put(return_data)
- except Queue.Empty:
- pass
- except:
- traceback.print_exc()
-
-class HostVars(dict):
- ''' A special view of vars_cache that adds values from the inventory when needed. '''
-
- def __init__(self, vars_cache, inventory, vault_password=None):
- self.vars_cache = vars_cache
- self.inventory = inventory
- self.lookup = {}
- self.update(vars_cache)
- self.vault_password = vault_password
-
- def __getitem__(self, host):
- if host not in self.lookup:
- result = self.inventory.get_variables(host, vault_password=self.vault_password).copy()
- result.update(self.vars_cache.get(host, {}))
- self.lookup[host] = template.template('.', result, self.vars_cache)
- return self.lookup[host]
-
-
-class Runner(object):
- ''' core API interface to ansible '''
-
- # see bin/ansible for how this is used...
-
- def __init__(self,
- host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
- module_path=None, # ex: /usr/share/ansible
- module_name=C.DEFAULT_MODULE_NAME, # ex: copy
- module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
- forks=C.DEFAULT_FORKS, # parallelism level
- timeout=C.DEFAULT_TIMEOUT, # SSH timeout
- pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
- remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
- remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
- remote_port=None, # if SSH on different ports
- private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
- background=0, # async poll every X seconds, else 0 for non-async
- basedir=None, # directory of playbook, if applicable
- setup_cache=None, # used to share fact data w/ other tasks
- vars_cache=None, # used to store variables about hosts
- transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
- conditional='True', # run only if this fact expression evals to true
- callbacks=None, # used for output
- module_vars=None, # a playbooks internals thing
- play_vars=None, #
- play_file_vars=None, #
- role_vars=None, #
- role_params=None, #
- default_vars=None, #
- extra_vars=None, # extra vars specified with he playbook(s)
- is_playbook=False, # running from playbook or not?
- inventory=None, # reference to Inventory object
- subset=None, # subset pattern
- check=False, # don't make any changes, just try to probe for potential changes
- diff=False, # whether to show diffs for template files that change
- environment=None, # environment variables (as dict) to use inside the command
- complex_args=None, # structured data in addition to module_args, must be a dict
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
- accelerate=False, # use accelerated connection
- accelerate_ipv6=False, # accelerated connection w/ IPv6
- accelerate_port=None, # port to use with accelerated connection
- vault_pass=None,
- run_hosts=None, # an optional list of pre-calculated hosts to run on
- no_log=False, # option to enable/disable logging for a given task
- run_once=False, # option to enable/disable host bypass loop for a given task
- become=False, # whether to run privilege escalation or not
- become_method=C.DEFAULT_BECOME_METHOD,
- become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
- become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
- become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
- ):
-
- # used to lock multiprocess inputs and outputs at various levels
- self.output_lockfile = OUTPUT_LOCKFILE
- self.process_lockfile = PROCESS_LOCKFILE
-
- if not complex_args:
- complex_args = {}
-
- # storage & defaults
- self.check = check
- self.diff = diff
- self.setup_cache = utils.default(setup_cache, lambda: ansible.cache.FactCache())
- self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
- self.basedir = utils.default(basedir, lambda: os.getcwd())
- self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
- self.generated_jid = str(random.randint(0, 999999999999))
- self.transport = transport
- self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
-
- self.module_vars = utils.default(module_vars, lambda: {})
- self.play_vars = utils.default(play_vars, lambda: {})
- self.play_file_vars = utils.default(play_file_vars, lambda: {})
- self.role_vars = utils.default(role_vars, lambda: {})
- self.role_params = utils.default(role_params, lambda: {})
- self.default_vars = utils.default(default_vars, lambda: {})
- self.extra_vars = utils.default(extra_vars, lambda: {})
-
- self.always_run = None
- self.connector = connection.Connector(self)
- self.conditional = conditional
- self.delegate_to = None
- self.module_name = module_name
- self.forks = int(forks)
- self.pattern = pattern
- self.module_args = module_args
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.private_key_file = private_key_file
- self.background = background
- self.become = become
- self.become_method = become_method
- self.become_user_var = become_user
- self.become_user = None
- self.become_pass = become_pass
- self.become_exe = become_exe
- self.is_playbook = is_playbook
- self.environment = environment
- self.complex_args = complex_args
- self.error_on_undefined_vars = error_on_undefined_vars
- self.accelerate = accelerate
- self.accelerate_port = accelerate_port
- self.accelerate_ipv6 = accelerate_ipv6
- self.callbacks.runner = self
- self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
- self.vault_pass = vault_pass
- self.no_log = no_log
- self.run_once = run_once
-
- if self.transport == 'smart':
- # If the transport is 'smart', check to see if certain conditions
- # would prevent us from using ssh, and fallback to paramiko.
- # 'smart' is the default since 1.2.1/1.3
- self.transport = "ssh"
- if sys.platform.startswith('darwin') and self.remote_pass:
- # due to a current bug in sshpass on OSX, which can trigger
- # a kernel panic even for non-privileged users, we revert to
- # paramiko on that OS when a SSH password is specified
- self.transport = "paramiko"
- else:
- # see if SSH can support ControlPersist if not use paramiko
- cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- if "Bad configuration option" in err:
- self.transport = "paramiko"
-
- # save the original transport, in case it gets
- # changed later via options like accelerate
- self.original_transport = self.transport
-
- # misc housekeeping
- if subset and self.inventory._subset is None:
- # don't override subset when passed from playbook
- self.inventory.subset(subset)
-
- # If we get a pre-built list of hosts to run on, from say a playbook, use them.
- # Also where we will store the hosts to run on once discovered
- self.run_hosts = run_hosts
-
- if self.transport == 'local':
- self.remote_user = pwd.getpwuid(os.geteuid())[0]
-
- if module_path is not None:
- for i in module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- utils.plugins.push_basedir(self.basedir)
-
- # ensure we are using unique tmp paths
- random.seed()
- # *****************************************************
-
- def _complex_args_hack(self, complex_args, module_args):
- """
- ansible-playbook both allows specifying key=value string arguments and complex arguments
- however not all modules use our python common module system and cannot
- access these. An example might be a Bash module. This hack allows users to still pass "args"
- as a hash of simple scalars to those arguments and is short term. We could technically
- just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented
- it does mean values in 'args' have LOWER priority than those on the key=value line, allowing
- args to provide yet another way to have pluggable defaults.
- """
- if complex_args is None:
- return module_args
- if not isinstance(complex_args, dict):
- raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args)
- for (k,v) in complex_args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- return module_args
-
- # *****************************************************
-
- def _transfer_str(self, conn, tmp, name, data):
- ''' transfer string to remote file '''
-
- if type(data) == dict:
- data = utils.jsonify(data)
-
- afd, afile = tempfile.mkstemp()
- afo = os.fdopen(afd, 'w')
- try:
- if not isinstance(data, unicode):
- #ensure the data is valid UTF-8
- data.decode('utf-8')
- else:
- data = data.encode('utf-8')
- afo.write(data)
- except:
- raise errors.AnsibleError("failure encoding into utf-8")
- afo.flush()
- afo.close()
-
- remote = conn.shell.join_path(tmp, name)
- try:
- conn.put_file(afile, remote)
- finally:
- os.unlink(afile)
- return remote
-
- # *****************************************************
-
- def _compute_environment_string(self, conn, inject=None):
- ''' what environment variables to use when running the command? '''
-
- enviro = {}
- if self.environment:
- enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
- enviro = utils.safe_eval(enviro)
- if type(enviro) != dict:
- raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
-
- return conn.shell.env_prefix(**enviro)
-
- # *****************************************************
-
- def _compute_delegate(self, password, remote_inject):
-
- """ Build a dictionary of all attributes for the delegate host """
-
- delegate = {}
-
- # allow delegated host to be templated
- delegate['inject'] = remote_inject.copy()
-
- # set any interpreters
- interpreters = []
- for i in delegate['inject']:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- interpreters.append(i)
- for i in interpreters:
- del delegate['inject'][i]
- port = C.DEFAULT_REMOTE_PORT
-
- # get the vars for the delegate by its name
- try:
- this_info = delegate['inject']['hostvars'][self.delegate_to]
- except:
- # make sure the inject is empty for non-inventory hosts
- this_info = {}
-
- # get the real ssh_address for the delegate
- # and allow ansible_ssh_host to be templated
- delegate['ssh_host'] = template.template(
- self.basedir,
- this_info.get('ansible_ssh_host', self.delegate_to),
- this_info,
- fail_on_undefined=True
- )
-
- delegate['port'] = this_info.get('ansible_ssh_port', port)
- delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject'])
- delegate['pass'] = this_info.get('ansible_ssh_pass', password)
- delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
- delegate['transport'] = this_info.get('ansible_connection', self.transport)
- delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
-
- # Last chance to get private_key_file from global variables.
- # this is useful if delegated host is not defined in the inventory
- if delegate['private_key_file'] is None:
- delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
-
- if delegate['private_key_file'] is not None:
- delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
-
- for i in this_info:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- delegate['inject'][i] = this_info[i]
-
- return delegate
-
- def _compute_delegate_user(self, host, inject):
-
- """ Calculate the remote user based on an order of preference """
-
- # inventory > playbook > original_host
-
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
- thisuser = None
-
- try:
- if host in inject['hostvars']:
- if inject['hostvars'][host].get('ansible_ssh_user'):
- # user for delegate host in inventory
- thisuser = inject['hostvars'][host].get('ansible_ssh_user')
- else:
- # look up the variables for the host directly from inventory
- host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- if 'ansible_ssh_user' in host_vars:
- thisuser = host_vars['ansible_ssh_user']
- except errors.AnsibleError, e:
- # the hostname was not found in the inventory, so
- # we just ignore this and try the next method
- pass
-
- if thisuser is None and self.remote_user:
- # user defined by play/runner
- thisuser = self.remote_user
-
- if thisuser is not None:
- actual_user = thisuser
- else:
- # fallback to the inventory user of the play host
- #actual_user = inject.get('ansible_ssh_user', actual_user)
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
-
- return actual_user
-
- def _count_module_args(self, args, allow_dupes=False):
- '''
- Count the number of k=v pairs in the supplied module args. This is
- basically a specialized version of parse_kv() from utils with a few
- minor changes.
- '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except Exception, e:
- if "unbalanced jinja2 block or quotes" in str(e):
- raise errors.AnsibleError("error parsing argument string '%s', try quoting the entire line." % args)
- else:
- raise
- for x in vargs:
- quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
- if "=" in x and not quoted:
- k, v = x.split("=",1)
- is_shell_module = self.module_name in ('command', 'shell')
- is_shell_param = k in ('creates', 'removes', 'chdir', 'executable')
- if k in options and not allow_dupes:
- if not(is_shell_module and not is_shell_param):
- raise errors.AnsibleError("a duplicate parameter was found in the argument string (%s)" % k)
- if is_shell_module and is_shell_param or not is_shell_module:
- options[k] = v
- return len(options)
-
-
- # *****************************************************
-
- def _execute_module(self, conn, tmp, module_name, args,
- async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True):
-
- ''' transfer and run a module along with its arguments on the remote side'''
-
- # hack to support fireball mode
- if module_name == 'fireball':
- args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
- if 'port' not in args:
- args += " port=%s" % C.ZEROMQ_PORT
-
- (
- module_style,
- shebang,
- module_data
- ) = self._configure_module(conn, module_name, args, inject, complex_args)
-
- # a remote tmp path may be necessary and not already created
- if self._late_needs_tmp_path(conn, tmp, module_style):
- tmp = self._make_tmp_path(conn)
-
- remote_module_path = conn.shell.join_path(tmp, module_name)
-
- if (module_style != 'new'
- or async_jid is not None
- or not conn.has_pipelining
- or not C.ANSIBLE_SSH_PIPELINING
- or C.DEFAULT_KEEP_REMOTE_FILES
- or self.become_method == 'su'):
- self._transfer_str(conn, tmp, module_name, module_data)
-
- environment_string = self._compute_environment_string(conn, inject)
-
- if "tmp" in tmp and (self.become and self.become_user != 'root'):
- # deal with possible umask issues once you become another user
- self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
-
- cmd = ""
- in_data = None
- if module_style != 'new':
- if 'CHECKMODE=True' in args:
- # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to
- # do --check mode, so to be safe we will not run it.
- return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules"))
- elif 'NO_LOG' in args:
- return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules"))
-
- args = template.template(self.basedir, args, inject)
-
- # decide whether we need to transfer JSON or key=value
- argsfile = None
- if module_style == 'non_native_want_json':
- if complex_args:
- complex_args.update(utils.parse_kv(args))
- argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args))
- else:
- argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args)))
-
- else:
- argsfile = self._transfer_str(conn, tmp, 'arguments', args)
-
- if self.become and self.become_user != 'root':
- # deal with possible umask issues once become another user
- self._remote_chmod(conn, 'a+r', argsfile, tmp)
-
- if async_jid is None:
- cmd = "%s %s" % (remote_module_path, argsfile)
- else:
- cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
- else:
- if async_jid is None:
- if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
- in_data = module_data
- else:
- cmd = "%s" % (remote_module_path)
- else:
- cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
-
- if not shebang:
- raise errors.AnsibleError("module is missing interpreter line")
-
- rm_tmp = None
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if not self.become or self.become_user == 'root':
- # not sudoing or sudoing to root, so can cleanup files in the same step
- rm_tmp = tmp
-
- cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
- cmd = cmd.strip()
-
- sudoable = True
- if module_name == "accelerate":
- # always run the accelerate module as the user
- # specified in the play, not the become_user
- sudoable = False
-
- res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
-
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if self.become and self.become_user != 'root':
- # not becoming root, so maybe can't delete files as that other user
- # have to clean up temp files as original user in a second step
- cmd2 = conn.shell.remove(tmp, recurse=True)
- self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
-
- data = utils.parse_json(res['stdout'], from_remote=True, no_exceptions=True)
- if 'parsed' in data and data['parsed'] == False:
- data['msg'] += res['stderr']
- return ReturnData(conn=conn, result=data)
-
- # *****************************************************
-
- def _executor(self, host, new_stdin):
- ''' handler for multiprocessing library '''
-
- try:
- fileno = sys.stdin.fileno()
- except ValueError:
- fileno = None
-
- try:
- self._new_stdin = new_stdin
- if not new_stdin and fileno is not None:
- try:
- self._new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
-
- exec_rc = self._executor_internal(host, new_stdin)
- if type(exec_rc) != ReturnData:
- raise Exception("unexpected return type: %s" % type(exec_rc))
- # redundant, right?
- if not exec_rc.comm_ok:
- self.callbacks.on_unreachable(host, exec_rc.result)
- return exec_rc
- except errors.AnsibleError, ae:
- msg = str(ae)
- self.callbacks.on_unreachable(host, msg)
- return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
- except Exception:
- msg = traceback.format_exc()
- self.callbacks.on_unreachable(host, msg)
- return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
-
- # *****************************************************
-
- def get_combined_cache(self):
- # merge the VARS and SETUP caches for this host
- combined_cache = self.setup_cache.copy()
- return utils.merge_hash(combined_cache, self.vars_cache)
-
- def get_inject_vars(self, host):
- host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
- combined_cache = self.get_combined_cache()
-
- # use combined_cache and host_variables to template the module_vars
- # we update the inject variables with the data we're about to template
- # since some of the variables we'll be replacing may be contained there too
- module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {}))
- module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject)
- module_vars = template.template(self.basedir, self.module_vars, module_vars_inject)
-
- # remove bad variables from the module vars, which may be in there due
- # the way role declarations are specified in playbooks
- if 'tags' in module_vars:
- del module_vars['tags']
- if 'when' in module_vars:
- del module_vars['when']
-
- # start building the dictionary of injected variables
- inject = {}
-
- # default vars are the lowest priority
- inject = utils.combine_vars(inject, self.default_vars)
- # next come inventory variables for the host
- inject = utils.combine_vars(inject, host_variables)
- # then the setup_cache which contains facts gathered
- inject = utils.combine_vars(inject, self.setup_cache.get(host, {}))
- # next come variables from vars and vars files
- inject = utils.combine_vars(inject, self.play_vars)
- inject = utils.combine_vars(inject, self.play_file_vars)
- # next come variables from role vars/main.yml files
- inject = utils.combine_vars(inject, self.role_vars)
- # then come the module variables
- inject = utils.combine_vars(inject, module_vars)
- # followed by vars_cache things (set_fact, include_vars, and
- # vars_files which had host-specific templating done)
- inject = utils.combine_vars(inject, self.vars_cache.get(host, {}))
- # role parameters next
- inject = utils.combine_vars(inject, self.role_params)
- # and finally -e vars are the highest priority
- inject = utils.combine_vars(inject, self.extra_vars)
- # and then special vars
- inject.setdefault('ansible_ssh_user', self.remote_user)
- inject['group_names'] = host_variables.get('group_names', [])
- inject['groups'] = self.inventory.groups_list()
- inject['vars'] = self.module_vars
- inject['defaults'] = self.default_vars
- inject['environment'] = self.environment
- inject['playbook_dir'] = os.path.abspath(self.basedir)
- inject['omit'] = self.omit_token
- inject['combined_cache'] = combined_cache
-
- return inject
-
- def _executor_internal(self, host, new_stdin):
- ''' executes any module one or more times '''
-
- # We build the proper injected dictionary for all future
- # templating operations in this run
- inject = self.get_inject_vars(host)
-
- # Then we selectively merge some variable dictionaries down to a
- # single dictionary, used to template the HostVars for this host
- temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
- temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
- temp_vars = utils.combine_vars(temp_vars, self.play_vars)
- temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
-
- hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
-
- # and we save the HostVars in the injected dictionary so they
- # may be referenced from playbooks/templates
- inject['hostvars'] = hostvars
-
- host_connection = inject.get('ansible_connection', self.transport)
- if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]:
- port = hostvars.get('ansible_ssh_port', self.remote_port)
- if port is None:
- port = C.DEFAULT_REMOTE_PORT
- else:
- # fireball, local, etc
- port = self.remote_port
-
- if self.inventory.basedir() is not None:
- inject['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- inject['inventory_file'] = self.inventory.src()
-
- # could be already set by playbook code
- inject.setdefault('ansible_version', utils.version_info(gitinfo=False))
-
- # allow with_foo to work in playbooks...
- items = None
- items_plugin = self.module_vars.get('items_lookup_plugin', None)
-
- if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
-
- basedir = self.basedir
- if '_original_file' in inject:
- basedir = os.path.dirname(inject['_original_file'])
- filesdir = os.path.join(basedir, '..', 'files')
- if os.path.exists(filesdir):
- basedir = filesdir
-
- try:
- items_terms = self.module_vars.get('items_lookup_terms', '')
- items_terms = template.template(basedir, items_terms, inject)
- items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject)
- except errors.AnsibleUndefinedVariable, e:
- if 'has no attribute' in str(e):
- # the undefined variable was an attribute of a variable that does
- # exist, so try and run this through the conditional check to see
- # if the user wanted to skip something on being undefined
- if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True):
- # the conditional check passed, so we have to fail here
- raise
- else:
- # the conditional failed, so we skip this task
- result = utils.jsonify(dict(changed=False, skipped=True))
- self.callbacks.on_skipped(host, None)
- return ReturnData(host=host, result=result)
- except errors.AnsibleError, e:
- raise
- except Exception, e:
- raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e))
-
- # strip out any jinja2 template syntax within
- # the data returned by the lookup plugin
- items = utils._clean_data_struct(items, from_remote=True)
- if items is None:
- items = []
- else:
- if type(items) != list:
- raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
-
- if len(items) and utils.is_list_of_strings(items) and self.module_name in ( 'apt', 'yum', 'pkgng', 'zypper', 'dnf' ):
- # hack for apt, yum, and pkgng so that with_items maps back into a single module call
- use_these_items = []
- for x in items:
- inject['item'] = x
- if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- use_these_items.append(x)
- inject['item'] = ",".join(use_these_items)
- items = None
-
- def _safe_template_complex_args(args, inject):
- # Ensure the complex args here are a dictionary, but
- # first template them if they contain a variable
-
- returned_args = args
- if isinstance(args, basestring):
- # If the complex_args were evaluated to a dictionary and there are
- # more keys in the templated version than the evaled version, some
- # param inserted additional keys (the template() call also runs
- # safe_eval on the var if it looks like it's a datastructure). If the
- # evaled_args are not a dict, it's most likely a whole variable (ie.
- # args: {{var}}), in which case there's no way to detect the proper
- # count of params in the dictionary.
-
- templated_args = template.template(self.basedir, args, inject, convert_bare=True)
- evaled_args = utils.safe_eval(args)
-
- if isinstance(evaled_args, dict) and len(evaled_args) > 0 and len(evaled_args) != len(templated_args):
- raise errors.AnsibleError("a variable tried to insert extra parameters into the args for this task")
-
- # set the returned_args to the templated_args
- returned_args = templated_args
-
- # and a final check to make sure the complex args are a dict
- if returned_args is not None and not isinstance(returned_args, dict):
- raise errors.AnsibleError("args must be a dictionary, received %s" % returned_args)
-
- return returned_args
-
- # logic to decide how to run things depends on whether with_items is used
- if items is None:
- complex_args = _safe_template_complex_args(self.complex_args, inject)
- return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args)
- elif len(items) > 0:
-
- # executing using with_items, so make multiple calls
- # TODO: refactor
-
- if self.background > 0:
- raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks")
-
- all_comm_ok = True
- all_changed = False
- all_failed = False
- results = []
- for x in items:
- # use a fresh inject for each item
- this_inject = inject.copy()
- this_inject['item'] = x
-
- complex_args = _safe_template_complex_args(self.complex_args, this_inject)
-
- result = self._executor_internal_inner(
- host,
- self.module_name,
- self.module_args,
- this_inject,
- port,
- complex_args=complex_args
- )
-
- if 'stdout' in result.result and 'stdout_lines' not in result.result:
- result.result['stdout_lines'] = result.result['stdout'].splitlines()
-
- results.append(result.result)
- if result.comm_ok == False:
- all_comm_ok = False
- all_failed = True
- break
- for x in results:
- if x.get('changed') == True:
- all_changed = True
- if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]:
- all_failed = True
- break
- msg = 'All items completed'
- if all_failed:
- msg = "One or more items failed."
- rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
- if not all_failed:
- del rd_result['failed']
- return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
- else:
- self.callbacks.on_skipped(host, None)
- return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
-
- # *****************************************************
-
- def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
- ''' decides how to invoke a module '''
-
- # late processing of parameterized become_user (with_items,..)
- if self.become_user_var is not None:
- self.become_user = template.template(self.basedir, self.become_user_var, inject)
-
- # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
- module_name = template.template(self.basedir, module_name, inject)
-
- if module_name in utils.plugins.action_loader:
- if self.background != 0:
- raise errors.AnsibleError("async mode is not supported with the %s module" % module_name)
- handler = utils.plugins.action_loader.get(module_name, self)
- elif self.background == 0:
- handler = utils.plugins.action_loader.get('normal', self)
- else:
- handler = utils.plugins.action_loader.get('async', self)
-
- if type(self.conditional) != list:
- self.conditional = [ self.conditional ]
-
- for cond in self.conditional:
-
- if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- result = dict(changed=False, skipped=True)
- if self.no_log:
- result = utils.censor_unlogged_data(result)
- self.callbacks.on_skipped(host, result)
- else:
- self.callbacks.on_skipped(host, inject.get('item',None))
- return ReturnData(host=host, result=utils.jsonify(result))
-
- if getattr(handler, 'setup', None) is not None:
- handler.setup(module_name, inject)
- conn = None
- actual_host = inject.get('ansible_ssh_host', host)
- # allow ansible_ssh_host to be templated
- actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True)
- actual_port = port
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
- actual_pass = inject.get('ansible_ssh_pass', self.remote_pass)
- actual_transport = inject.get('ansible_connection', self.transport)
- actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
- actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
-
- self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
- self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
- self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
- self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
- self.become_method = inject.get('ansible_become_method', self.become_method)
-
- # select default root user in case self.become requested
- # but no user specified; happens e.g. in host vars when
- # just ansible_become=True is specified
- if self.become and self.become_user is None:
- self.become_user = 'root'
-
- if actual_private_key_file is not None:
- actual_private_key_file = os.path.expanduser(actual_private_key_file)
-
- if self.accelerate and actual_transport != 'local':
- #Fix to get the inventory name of the host to accelerate plugin
- if inject.get('ansible_ssh_host', None):
- self.accelerate_inventory_host = host
- else:
- self.accelerate_inventory_host = None
- # if we're using accelerated mode, force the
- # transport to accelerate
- actual_transport = "accelerate"
- if not self.accelerate_port:
- self.accelerate_port = C.ACCELERATE_PORT
-
- actual_port = inject.get('ansible_ssh_port', port)
-
- # the delegated host may have different SSH port configured, etc
- # and we need to transfer those, and only those, variables
- self.delegate_to = inject.get('delegate_to', None)
- if self.delegate_to:
- self.delegate_to = template.template(self.basedir, self.delegate_to, inject)
-
- if self.delegate_to is not None:
- delegate = self._compute_delegate(actual_pass, inject)
- actual_transport = delegate['transport']
- actual_host = delegate['ssh_host']
- actual_port = delegate['port']
- actual_user = delegate['user']
- actual_pass = delegate['pass']
- actual_private_key_file = delegate['private_key_file']
- self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
- inject = delegate['inject']
- # set resolved delegate_to into inject so modules can call _remote_checksum
- inject['delegate_to'] = self.delegate_to
-
- # user/pass may still contain variables at this stage
- actual_user = template.template(self.basedir, actual_user, inject)
- try:
- actual_pass = template.template(self.basedir, actual_pass, inject)
- self.become_pass = template.template(self.basedir, self.become_pass, inject)
- except:
- # ignore password template errors, could be triggered by password charaters #10468
- pass
-
- # make actual_user available as __magic__ ansible_ssh_user variable
- inject['ansible_ssh_user'] = actual_user
-
- try:
- if actual_transport == 'accelerate':
- # for accelerate, we stuff both ports into a single
- # variable so that we don't have to mangle other function
- # calls just to accommodate this one case
- actual_port = [actual_port, self.accelerate_port]
- elif actual_port is not None:
- actual_port = int(template.template(self.basedir, actual_port, inject))
- except ValueError, e:
- result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port)
- return ReturnData(host=host, comm_ok=False, result=result)
-
- try:
- if self.delegate_to or host != actual_host:
- delegate_host = host
- else:
- delegate_host = None
- conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file, delegate_host)
-
- default_shell = getattr(conn, 'default_shell', '')
- shell_type = inject.get('ansible_shell_type')
- if not shell_type:
- if default_shell:
- shell_type = default_shell
- else:
- shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
-
- shell_plugin = utils.plugins.shell_loader.get(shell_type)
- if shell_plugin is None:
- shell_plugin = utils.plugins.shell_loader.get('sh')
- conn.shell = shell_plugin
-
- except errors.AnsibleConnectionFailed, e:
- result = dict(failed=True, msg="FAILED: %s" % str(e))
- return ReturnData(host=host, comm_ok=False, result=result)
-
- tmp = ''
- # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir
- if self._early_needs_tmp_path(module_name, handler):
- tmp = self._make_tmp_path(conn)
-
- # allow module args to work as a dictionary
- # though it is usually a string
- if isinstance(module_args, dict):
- module_args = utils.serialize_args(module_args)
-
- # render module_args and complex_args templates
- try:
- # When templating module_args, we need to be careful to ensure
- # that no variables inadvertently (or maliciously) add params
- # to the list of args. We do this by counting the number of k=v
- # pairs before and after templating.
- num_args_pre = self._count_module_args(module_args, allow_dupes=True)
- module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars)
- num_args_post = self._count_module_args(module_args)
- if num_args_pre != num_args_post:
- raise errors.AnsibleError("A variable inserted a new parameter into the module args. " + \
- "Be sure to quote variables if they contain equal signs (for example: \"{{var}}\").")
- # And we also make sure nothing added in special flags for things
- # like the command/shell module (ie. #USE_SHELL)
- if '#USE_SHELL' in module_args:
- raise errors.AnsibleError("A variable tried to add #USE_SHELL to the module arguments.")
- complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars)
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
-
- # filter omitted arguments out from complex_args
- if complex_args:
- complex_args = dict(filter(lambda x: x[1] != self.omit_token, complex_args.iteritems()))
-
- # Filter omitted arguments out from module_args.
- # We do this with split_args instead of parse_kv to ensure
- # that things are not unquoted/requoted incorrectly
- args = split_args(module_args)
- final_args = []
- for arg in args:
- if '=' in arg:
- k,v = arg.split('=', 1)
- if unquote(v) != self.omit_token:
- final_args.append(arg)
- else:
- # not a k=v param, append it
- final_args.append(arg)
- module_args = ' '.join(final_args)
-
- result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
- # Code for do until feature
- until = self.module_vars.get('until', None)
- if until is not None and result.comm_ok:
- inject[self.module_vars.get('register')] = result.result
-
- cond = template.template(self.basedir, until, inject, expand_lists=False)
- if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False)
- delay = self.module_vars.get('delay')
- for x in range(1, int(retries) + 1):
- # template the delay, cast to float and sleep
- delay = template.template(self.basedir, delay, inject, expand_lists=False)
- delay = float(delay)
- time.sleep(delay)
- tmp = ''
- if self._early_needs_tmp_path(module_name, handler):
- tmp = self._make_tmp_path(conn)
- result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
- result.result['attempts'] = x
- vv("Result from run %i is: %s" % (x, result.result))
- inject[self.module_vars.get('register')] = result.result
- cond = template.template(self.basedir, until, inject, expand_lists=False)
- if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- break
- if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- result.result['failed'] = True
- result.result['msg'] = "Task failed as maximum retries was encountered"
- else:
- result.result['attempts'] = 0
- conn.close()
-
- if not result.comm_ok:
- # connection or parsing errors...
- self.callbacks.on_unreachable(host, result.result)
- else:
- data = result.result
-
- # https://github.com/ansible/ansible/issues/4958
- if hasattr(sys.stdout, "isatty"):
- if "stdout" in data and sys.stdout.isatty():
- if not string_functions.isprintable(data['stdout']):
- data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c))
-
- if 'item' in inject:
- result.result['item'] = inject['item']
-
- result.result['invocation'] = dict(
- module_args=module_args,
- module_name=module_name
- )
-
- changed_when = self.module_vars.get('changed_when')
- failed_when = self.module_vars.get('failed_when')
- if (changed_when is not None or failed_when is not None) and self.background == 0:
- register = self.module_vars.get('register')
- if register is not None:
- if 'stdout' in data:
- data['stdout_lines'] = data['stdout'].splitlines()
- inject[register] = data
- # only run the final checks if the async_status has finished,
- # or if we're not running an async_status check at all
- if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status':
- if changed_when is not None and 'skipped' not in data:
- data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
- if failed_when is not None and 'skipped' not in data:
- data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
-
-
- if is_chained:
- # no callbacks
- return result
- if 'skipped' in data:
- self.callbacks.on_skipped(host, inject.get('item',None))
-
- if self.no_log:
- data = utils.censor_unlogged_data(data)
-
- if not result.is_successful():
- ignore_errors = self.module_vars.get('ignore_errors', False)
- self.callbacks.on_failed(host, data, ignore_errors)
- else:
- if self.diff:
- self.callbacks.on_file_diff(conn.host, result.diff)
- self.callbacks.on_ok(host, data)
-
- return result
-
- def _early_needs_tmp_path(self, module_name, handler):
- ''' detect if a tmp path should be created before the handler is called '''
- if module_name in utils.plugins.action_loader:
- return getattr(handler, 'TRANSFERS_FILES', False)
- # other modules never need tmp path at early stage
- return False
-
- def _late_needs_tmp_path(self, conn, tmp, module_style):
- if "tmp" in tmp:
- # tmp has already been created
- return False
- if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
- # tmp is necessary to store module source code
- return True
- if not conn.has_pipelining:
- # tmp is necessary to store the module source code
- # or we want to keep the files on the target system
- return True
- if module_style != "new":
- # even when conn has pipelining, old style modules need tmp to store arguments
- return True
- return False
-
-
- # *****************************************************
-
- def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
- executable=None, become=False, in_data=None):
- ''' execute a command string over SSH, return the output '''
- # this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
- if cmd:
-
- if executable is None:
- executable = C.DEFAULT_EXECUTABLE
-
- become_user = self.become_user
-
- # compare connection user to (su|sudo)_user and disable if the same
- # assume connection type is local if no user attribute
- this_user = getattr(conn, 'user', getpass.getuser())
- if (not become and this_user == become_user):
- sudoable = False
- become = False
-
- rc, stdin, stdout, stderr = conn.exec_command(cmd,
- tmp,
- become_user=become_user,
- sudoable=sudoable,
- executable=executable,
- in_data=in_data)
-
- if type(stdout) not in [ str, unicode ]:
- out = ''.join(stdout.readlines())
- else:
- out = stdout
-
- if type(stderr) not in [ str, unicode ]:
- err = ''.join(stderr.readlines())
- else:
- err = stderr
-
- if rc is not None:
- return dict(rc=rc, stdout=out, stderr=err)
- else:
- return dict(stdout=out, stderr=err)
-
- return dict(rc=None, stdout='', stderr='')
-
-
- # *****************************************************
-
- def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
- ''' issue a remote chmod command '''
- cmd = conn.shell.chmod(mode, path)
- return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
-
- # *****************************************************
-
- def _remote_expand_user(self, conn, path, tmp):
- ''' takes a remote path and performs tilde expansion on the remote host '''
- if not path.startswith('~'):
- return path
-
- split_path = path.split(os.path.sep, 1)
- expand_path = split_path[0]
- if expand_path == '~':
- if self.become and self.become_user:
- expand_path = '~%s' % self.become_user
-
- cmd = conn.shell.expand_user(expand_path)
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
- initial_fragment = utils.last_non_blank_line(data['stdout'])
-
- if not initial_fragment:
- # Something went wrong trying to expand the path remotely. Return
- # the original string
- return path
-
- if len(split_path) > 1:
- return conn.shell.join_path(initial_fragment, *split_path[1:])
- else:
- return initial_fragment
-
- # *****************************************************
-
- def _remote_checksum(self, conn, tmp, path, inject):
- ''' takes a remote checksum and returns 1 if no file '''
-
- # Lookup the python interp from the host or delegate
-
- # host == inven_host when there is no delegate
- host = inject['inventory_hostname']
- if 'delegate_to' in inject:
- delegate = inject['delegate_to']
- if delegate:
- # host == None when the delegate is not in inventory
- host = None
- # delegate set, check whether the delegate has inventory vars
- delegate = template.template(self.basedir, delegate, inject)
- if delegate in inject['hostvars']:
- # host == delegate if we need to lookup the
- # python_interpreter from the delegate's inventory vars
- host = delegate
-
- if host:
- python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python')
- else:
- python_interp = 'python'
-
- cmd = conn.shell.checksum(path, python_interp)
-
- #TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
- if self.become_method == 'sudo':
- sudoable = True
- else:
- sudoable = False
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
- data2 = utils.last_non_blank_line(data['stdout'])
- try:
- if data2 == '':
- # this may happen if the connection to the remote server
- # failed, so just return "INVALIDCHECKSUM" to avoid errors
- return "INVALIDCHECKSUM"
- else:
- return data2.split()[0]
- except IndexError:
- sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
- sys.stderr.write("command: %s\n" % cmd)
- sys.stderr.write("----\n")
- sys.stderr.write("output: %s\n" % data)
- sys.stderr.write("----\n")
- # this will signal that it changed and allow things to keep going
- return "INVALIDCHECKSUM"
-
- # *****************************************************
-
- def _make_tmp_path(self, conn):
- ''' make and return a temporary path on a remote box '''
- basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
- use_system_tmp = False
- if self.become and self.become_user != 'root':
- use_system_tmp = True
-
- tmp_mode = None
- if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
- tmp_mode = 'a+rx'
-
- cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
- result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
-
- # error handling on this seems a little aggressive?
- if result['rc'] != 0:
- if result['rc'] == 5:
- output = 'Authentication failure.'
- elif result['rc'] == 255 and self.transport in ['ssh']:
- if utils.VERBOSITY > 3:
- output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
- else:
- output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
- elif 'No space left on device' in result['stderr']:
- output = result['stderr']
- else:
- output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
- if 'stdout' in result and result['stdout'] != '':
- output = output + ": %s" % result['stdout']
- raise errors.AnsibleError(output)
-
- rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
- # Catch failure conditions, files should never be
- # written to locations in /.
- if rc == '/':
- raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
- return rc
-
- # *****************************************************
-
- def _remove_tmp_path(self, conn, tmp_path):
- ''' Remove a tmp_path. '''
- if "-tmp-" in tmp_path:
- cmd = conn.shell.remove(tmp_path, recurse=True)
- self._low_level_exec_command(conn, cmd, None, sudoable=False)
- # If we have gotten here we have a working ssh configuration.
- # If ssh breaks we could leave tmp directories out on the remote system.
-
- # *****************************************************
-
- def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None):
- ''' transfer a module over SFTP, does not run it '''
- (
- module_style,
- module_shebang,
- module_data
- ) = self._configure_module(conn, module_name, module_args, inject, complex_args)
- module_remote_path = conn.shell.join_path(tmp, module_name)
-
- self._transfer_str(conn, tmp, module_name, module_data)
-
- return (module_remote_path, module_style, module_shebang)
-
- # *****************************************************
-
- def _configure_module(self, conn, module_name, module_args, inject, complex_args=None):
- ''' find module and configure it '''
-
- # Search module path(s) for named module.
- module_suffixes = getattr(conn, 'default_suffixes', None)
- module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes)
- if module_path is None:
- module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes)
- if module_path2 is not None:
- raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name))
- else:
- raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name))
-
-
- # insert shared code and arguments into the module
- (module_data, module_style, module_shebang) = module_replacer.modify_module(
- module_path, complex_args, module_args, inject
- )
-
- return (module_style, module_shebang, module_data)
-
-
- # *****************************************************
-
-
- def _parallel_exec(self, hosts):
- ''' handles mulitprocessing when more than 1 fork is required '''
-
- manager = multiprocessing.Manager()
- job_queue = manager.Queue()
- for host in hosts:
- job_queue.put(host)
- result_queue = manager.Queue()
-
- try:
- fileno = sys.stdin.fileno()
- except ValueError:
- fileno = None
-
- workers = []
- for i in range(self.forks):
- new_stdin = None
- if fileno is not None:
- try:
- new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
- prc = multiprocessing.Process(target=_executor_hook,
- args=(job_queue, result_queue, new_stdin))
- prc.start()
- workers.append(prc)
-
- try:
- for worker in workers:
- worker.join()
- except KeyboardInterrupt:
- for worker in workers:
- worker.terminate()
- worker.join()
-
- results = []
- try:
- while not result_queue.empty():
- results.append(result_queue.get(block=False))
- except socket.error:
- raise errors.AnsibleError("<interrupted>")
- return results
-
- # *****************************************************
-
- def _partition_results(self, results):
- ''' separate results by ones we contacted & ones we didn't '''
-
- if results is None:
- return None
- results2 = dict(contacted={}, dark={})
-
- for result in results:
- host = result.host
- if host is None:
- raise Exception("internal error, host not set")
- if result.communicated_ok():
- results2["contacted"][host] = result.result
- else:
- results2["dark"][host] = result.result
-
- # hosts which were contacted but never got a chance to return
- for host in self.run_hosts:
- if not (host in results2['dark'] or host in results2['contacted']):
- results2["dark"][host] = {}
- return results2
-
- # *****************************************************
-
- def run(self):
- ''' xfer & run module on all matched hosts '''
-
- # find hosts that match the pattern
- if not self.run_hosts:
- self.run_hosts = self.inventory.list_hosts(self.pattern)
- hosts = self.run_hosts
- if len(hosts) == 0:
- self.callbacks.on_no_hosts()
- return dict(contacted={}, dark={})
-
- global multiprocessing_runner
- multiprocessing_runner = self
- results = None
-
- # Check if this is an action plugin. Some of them are designed
- # to be ran once per group of hosts. Example module: pause,
- # run once per hostgroup, rather than pausing once per each
- # host.
- p = utils.plugins.action_loader.get(self.module_name, self)
-
- if self.forks == 0 or self.forks > len(hosts):
- self.forks = len(hosts)
-
- if (p and (getattr(p, 'BYPASS_HOST_LOOP', None)) or self.run_once):
-
- # Expose the current hostgroup to the bypassing plugins
- self.host_set = hosts
- # We aren't iterating over all the hosts in this
- # group. So, just choose the "delegate_to" host if that is defined and is
- # one of the targeted hosts, otherwise pick the first host in our group to
- # construct the conn object with.
- if self.delegate_to is not None and self.delegate_to in hosts:
- host = self.delegate_to
- else:
- host = hosts[0]
-
- result_data = self._executor(host, None).result
- # Create a ResultData item for each host in this group
- # using the returned result. If we didn't do this we would
- # get false reports of dark hosts.
- results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
- for h in hosts ]
- del self.host_set
-
- elif self.forks > 1:
- try:
- results = self._parallel_exec(hosts)
- except IOError, ie:
- print ie.errno
- if ie.errno == 32:
- # broken pipe from Ctrl+C
- raise errors.AnsibleError("interrupted")
- raise
- else:
- results = [ self._executor(h, None) for h in hosts ]
-
- return self._partition_results(results)
-
- # *****************************************************
-
- def run_async(self, time_limit):
- ''' Run this module asynchronously and return a poller. '''
-
- self.background = time_limit
- results = self.run()
- return results, poller.AsyncPoller(results, self)
-
- # *****************************************************
-
- def noop_on_check(self, inject):
- ''' Should the runner run in check mode or not ? '''
-
- # initialize self.always_run on first call
- if self.always_run is None:
- self.always_run = self.module_vars.get('always_run', False)
- self.always_run = check_conditional(
- self.always_run, self.basedir, inject, fail_on_undefined=True)
-
- return (self.check and not self.always_run)
diff --git a/v1/ansible/runner/action_plugins/__init__.py b/v1/ansible/runner/action_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/action_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/action_plugins/add_host.py b/v1/ansible/runner/action_plugins/add_host.py
deleted file mode 100644
index 995b205b62..0000000000
--- a/v1/ansible/runner/action_plugins/add_host.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv, combine_vars
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-
-class ActionModule(object):
- ''' Create inventory hosts and groups in the memory inventory'''
-
- ### We need to be able to modify the inventory
- BYPASS_HOST_LOOP = True
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(parse_kv(module_args))
- if not 'hostname' in args and not 'name' in args:
- raise ae("'name' is a required argument.")
-
- result = {}
-
- # Parse out any hostname:port patterns
- new_name = args.get('name', args.get('hostname', None))
- vv("creating host via 'add_host': hostname=%s" % new_name)
-
- if ":" in new_name:
- new_name, new_port = new_name.split(":")
- args['ansible_ssh_port'] = new_port
-
- # redefine inventory and get group "all"
- inventory = self.runner.inventory
- allgroup = inventory.get_group('all')
-
- # check if host in cache, add if not
- if new_name in inventory._hosts_cache:
- new_host = inventory._hosts_cache[new_name]
- else:
- new_host = Host(new_name)
- # only groups can be added directly to inventory
- inventory._hosts_cache[new_name] = new_host
- allgroup.add_host(new_host)
-
- groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
- # add it to the group if that was specified
- if groupnames:
- for group_name in groupnames.split(","):
- group_name = group_name.strip()
- if not inventory.get_group(group_name):
- new_group = Group(group_name)
- inventory.add_group(new_group)
- new_group.vars = inventory.get_group_variables(group_name, vault_password=inventory._vault_password)
- grp = inventory.get_group(group_name)
- grp.add_host(new_host)
-
- # add this host to the group cache
- if inventory._groups_list is not None:
- if group_name in inventory._groups_list:
- if new_host.name not in inventory._groups_list[group_name]:
- inventory._groups_list[group_name].append(new_host.name)
-
- vv("added host to group via add_host module: %s" % group_name)
- result['new_groups'] = groupnames.split(",")
-
-
- # actually load host vars
- new_host.vars = combine_vars(new_host.vars, inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password))
-
- # Add any passed variables to the new_host
- for k in args.keys():
- if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
- new_host.set_variable(k, args[k])
-
- result['new_host'] = new_name
-
- # clear pattern caching completely since it's unpredictable what
- # patterns may have referenced the group
- inventory.clear_pattern_cache()
-
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
-
-
diff --git a/v1/ansible/runner/action_plugins/assemble.py b/v1/ansible/runner/action_plugins/assemble.py
deleted file mode 100644
index 33a4838e32..0000000000
--- a/v1/ansible/runner/action_plugins/assemble.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
-# Stephen Fromm <sfromm@gmail.com>
-# Brian Coca <briancoca+dev@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-
-import os
-import os.path
-import pipes
-import shutil
-import tempfile
-import base64
-import re
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
- ''' assemble a file from a directory of fragments '''
- tmpfd, temp_path = tempfile.mkstemp()
- tmp = os.fdopen(tmpfd,'w')
- delimit_me = False
- add_newline = False
-
- for f in sorted(os.listdir(src_path)):
- if compiled_regexp and not compiled_regexp.search(f):
- continue
- fragment = "%s/%s" % (src_path, f)
- if not os.path.isfile(fragment):
- continue
- fragment_content = file(fragment).read()
-
- # always put a newline between fragments if the previous fragment didn't end with a newline.
- if add_newline:
- tmp.write('\n')
-
- # delimiters should only appear between fragments
- if delimit_me:
- if delimiter:
- # un-escape anything like newlines
- delimiter = delimiter.decode('unicode-escape')
- tmp.write(delimiter)
- # always make sure there's a newline after the
- # delimiter, so lines don't run together
- if delimiter[-1] != '\n':
- tmp.write('\n')
-
- tmp.write(fragment_content)
- delimit_me = True
- if fragment_content.endswith('\n'):
- add_newline = False
- else:
- add_newline = True
-
- tmp.close()
- return temp_path
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
-
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- delimiter = options.get('delimiter', None)
- remote_src = utils.boolean(options.get('remote_src', 'yes'))
- regexp = options.get('regexp', None)
-
-
- if src is None or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- if remote_src:
- return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
- elif '_original_file' in inject:
- src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
- else:
- # the source is local, so expand it here
- src = os.path.expanduser(src)
-
- _re = None
- if regexp is not None:
- _re = re.compile(regexp)
-
- # Does all work assembling the file
- path = self._assemble_from_fragments(src, delimiter, _re)
-
- path_checksum = utils.checksum_s(path)
- dest = self.runner._remote_expand_user(conn, dest, tmp)
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if path_checksum != remote_checksum:
- resultant = file(path).read()
- if self.runner.diff:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(src),
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
- res.diff = dict(after=resultant)
- return res
- else:
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(src),
- )
-
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- return self.runner._execute_module(conn, tmp, 'file', module_args_tmp, inject=inject)
diff --git a/v1/ansible/runner/action_plugins/assert.py b/v1/ansible/runner/action_plugins/assert.py
deleted file mode 100644
index a0e02dedb0..0000000000
--- a/v1/ansible/runner/action_plugins/assert.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils, errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Fail with custom message '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # note: the fail module does not need to pay attention to check mode
- # it always runs.
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(utils.parse_kv(module_args))
-
- msg = None
- if 'msg' in args:
- msg = args['msg']
-
- if not 'that' in args:
- raise errors.AnsibleError('conditional required in "that" string')
-
- if not isinstance(args['that'], list):
- args['that'] = [ args['that'] ]
-
- for that in args['that']:
- test_result = utils.check_conditional(that, self.runner.basedir, inject, fail_on_undefined=True)
- if not test_result:
- result = dict(
- failed = True,
- evaluated_to = test_result,
- assertion = that,
- )
- if msg:
- result['msg'] = msg
- return ReturnData(conn=conn, result=result)
-
- return ReturnData(conn=conn, result=dict(msg='all assertions passed'))
-
diff --git a/v1/ansible/runner/action_plugins/async.py b/v1/ansible/runner/action_plugins/async.py
deleted file mode 100644
index dc53d6fa6c..0000000000
--- a/v1/ansible/runner/action_plugins/async.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' transfer the given module name, plus the async module, then run it '''
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
-
- # shell and command module are the same
- if module_name == 'shell':
- module_name = 'command'
- module_args += " #USE_SHELL"
-
- if "tmp" not in tmp:
- tmp = self.runner._make_tmp_path(conn)
-
- (module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
- self.runner._remote_chmod(conn, 'a+rx', module_path, tmp)
-
- return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
- async_module=module_path,
- async_jid=self.runner.generated_jid,
- async_limit=self.runner.background,
- inject=inject
- )
-
diff --git a/v1/ansible/runner/action_plugins/copy.py b/v1/ansible/runner/action_plugins/copy.py
deleted file mode 100644
index a6a5cb5a27..0000000000
--- a/v1/ansible/runner/action_plugins/copy.py
+++ /dev/null
@@ -1,381 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.constants as C
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-import json
-import stat
-import tempfile
-import pipes
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- content = options.get('content', None)
- dest = options.get('dest', None)
- raw = utils.boolean(options.get('raw', 'no'))
- force = utils.boolean(options.get('force', 'yes'))
-
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
- if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- result=dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
- elif (source is not None or 'first_available_file' in inject) and content is not None:
- result=dict(failed=True, msg="src and content are mutually exclusive")
- return ReturnData(conn=conn, result=result)
-
- # Check if the source ends with a "/"
- source_trailing_slash = False
- if source:
- source_trailing_slash = source.endswith("/")
-
- # Define content_tempfile in case we set it after finding content populated.
- content_tempfile = None
-
- # If content is defined make a temp file and write the content into it.
- if content is not None:
- try:
- # If content comes to us as a dict it should be decoded json.
- # We need to encode it back into a string to write it out.
- if type(content) is dict:
- content_tempfile = self._create_content_tempfile(json.dumps(content))
- else:
- content_tempfile = self._create_content_tempfile(content)
- source = content_tempfile
- except Exception, err:
- result = dict(failed=True, msg="could not write content temp file: %s" % err)
- return ReturnData(conn=conn, result=result)
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- elif 'first_available_file' in inject:
- found = False
- for fn in inject.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- results = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, result=results)
- else:
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
- source_files = []
-
- # If source is a directory populate our list else source is a file and translate it to a tuple.
- if os.path.isdir(source):
- # Get the amount of spaces to remove to get the relative path.
- if source_trailing_slash:
- sz = len(source) + 1
- else:
- sz = len(source.rsplit('/', 1)[0]) + 1
-
- # Walk the directory and append the file tuples to source_files.
- for base_path, sub_folders, files in os.walk(source):
- for file in files:
- full_path = os.path.join(base_path, file)
- rel_path = full_path[sz:]
- source_files.append((full_path, rel_path))
-
- # If it's recursive copy, destination is always a dir,
- # explicitly mark it so (note - copy module relies on this).
- if not conn.shell.path_has_trailing_slash(dest):
- dest = conn.shell.join_path(dest, '')
- else:
- source_files.append((source, os.path.basename(source)))
-
- changed = False
- diffs = []
- module_result = {"changed": False}
-
- # A register for if we executed a module.
- # Used to cut down on command calls when not recursive.
- module_executed = False
-
- # Tell _execute_module to delete the file if there is one file.
- delete_remote_tmp = (len(source_files) == 1)
-
- # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
- if not delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- # expand any user home dir specifier
- dest = self.runner._remote_expand_user(conn, dest, tmp_path)
-
- for source_full, source_rel in source_files:
- # Generate a hash of the local file.
- local_checksum = utils.checksum(source_full)
-
- # If local_checksum is not defined we can't find the file so we should fail out.
- if local_checksum is None:
- result = dict(failed=True, msg="could not find src=%s" % source_full)
- return ReturnData(conn=conn, result=result)
-
- # This is kind of optimization - if user told us destination is
- # dir, do path manipulation right away, otherwise we still check
- # for dest being a dir via remote call below.
- if conn.shell.path_has_trailing_slash(dest):
- dest_file = conn.shell.join_path(dest, source_rel)
- else:
- dest_file = conn.shell.join_path(dest)
-
- # Attempt to get the remote checksum
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '3':
- # The remote_checksum was executed on a directory.
- if content is not None:
- # If source was defined as content remove the temporary file and fail out.
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- result = dict(failed=True, msg="can not use content with a dir as dest")
- return ReturnData(conn=conn, result=result)
- else:
- # Append the relative source location to the destination and retry remote_checksum
- dest_file = conn.shell.join_path(dest, source_rel)
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '4':
- result = dict(msg="python isn't present on the system. Unable to compute checksum", failed=True)
- return ReturnData(conn=conn, result=result)
-
- if remote_checksum != '1' and not force:
- # remote_file exists so continue to next iteration.
- continue
-
- if local_checksum != remote_checksum:
- # The checksums don't match and we will change or error out.
- changed = True
-
- # Create a tmp_path if missing only if this is not recursive.
- # If this is recursive we already have a tmp_path.
- if delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- if self.runner.diff and not raw:
- diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
- else:
- diff = {}
-
- if self.runner.noop_on_check(inject):
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- diffs.append(diff)
- changed = True
- module_result = dict(changed=True)
- continue
-
- # Define a remote directory that we will copy the file to.
- tmp_src = tmp_path + 'source'
-
- if not raw:
- conn.put_file(source_full, tmp_src)
- else:
- conn.put_file(source_full, dest_file)
-
- # We have copied the file remotely and no longer require our content_tempfile
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' and not raw:
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
-
- if raw:
- # Continue to next iteration if raw is defined.
- continue
-
- # Run the copy module
-
- # src and dest here come after original and override them
- # we pass dest only to make sure it includes trailing slash in case of recursive copy
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- module_return = self.runner._execute_module(conn, tmp_path, 'copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- else:
- # no need to transfer the file, already correct hash, but still need to call
- # the file module in case we want to change attributes
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- if raw:
- # Continue to next iteration if raw is defined.
- # self.runner._remove_tmp_path(conn, tmp_path)
- continue
-
- tmp_src = tmp_path + source_rel
-
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- # Execute the file module.
- module_return = self.runner._execute_module(conn, tmp_path, 'file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- module_result = module_return.result
- if not module_result.get('checksum'):
- module_result['checksum'] = local_checksum
- if module_result.get('failed') == True:
- return module_return
- if module_result.get('changed') == True:
- changed = True
-
- # Delete tmp_path if we were recursive or if we did not execute a module.
- if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
- or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
- self.runner._remove_tmp_path(conn, tmp_path)
-
- # the file module returns the file path as 'path', but
- # the copy module uses 'dest', so add it if it's not there
- if 'path' in module_result and 'dest' not in module_result:
- module_result['dest'] = module_result['path']
-
- # TODO: Support detailed status/diff for multiple files
- if len(source_files) == 1:
- result = module_result
- else:
- result = dict(dest=dest, src=source, changed=changed)
- if len(diffs) == 1:
- return ReturnData(conn=conn, result=result, diff=diffs[0])
- else:
- return ReturnData(conn=conn, result=result)
-
- def _create_content_tempfile(self, content):
- ''' Create a tempfile containing defined content '''
- fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
- try:
- f.write(content)
- except Exception, err:
- os.remove(content_tempfile)
- raise Exception(err)
- finally:
- f.close()
- return content_tempfile
-
- def _get_diff_data(self, conn, tmp, inject, destination, source):
- peek_result = self.runner._execute_module(conn, tmp, 'file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
-
- if not peek_result.is_successful():
- return {}
-
- diff = {}
- if peek_result.result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result.result['appears_binary']:
- diff['dst_binary'] = 1
- elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
-
- def _remove_tempfile_if_content_defined(self, content, content_tempfile):
- if content is not None:
- os.remove(content_tempfile)
-
-
- def _result_key_merge(self, options, results):
- # add keys to file module results to mimic copy
- if 'path' in results.result and 'dest' not in results.result:
- results.result['dest'] = results.result['path']
- del results.result['path']
- return results
diff --git a/v1/ansible/runner/action_plugins/debug.py b/v1/ansible/runner/action_plugins/debug.py
deleted file mode 100644
index eaf1364c3f..0000000000
--- a/v1/ansible/runner/action_plugins/debug.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils
-from ansible.utils import template
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Print statements during execution '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
- self.basedir = runner.basedir
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- args = {}
- if complex_args:
- args.update(complex_args)
-
- # attempt to prevent confusing messages when the variable didn't interpolate
- module_args = module_args.replace("{{ ","{{").replace(" }}","}}")
-
- kv = utils.parse_kv(module_args)
- args.update(kv)
-
- if not 'msg' in args and not 'var' in args:
- args['msg'] = 'Hello world!'
-
- result = {}
- if 'msg' in args:
- if 'fail' in args and utils.boolean(args['fail']):
- result = dict(failed=True, msg=args['msg'])
- else:
- result = dict(msg=args['msg'])
- elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']):
- results = template.template(self.basedir, args['var'], inject, convert_bare=True)
- result['var'] = { args['var']: results }
-
- # force flag to make debug output module always verbose
- result['verbose_always'] = True
-
- return ReturnData(conn=conn, result=result)
diff --git a/v1/ansible/runner/action_plugins/fail.py b/v1/ansible/runner/action_plugins/fail.py
deleted file mode 100644
index 2bbaf40313..0000000000
--- a/v1/ansible/runner/action_plugins/fail.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Fail with custom message '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # note: the fail module does not need to pay attention to check mode
- # it always runs.
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(utils.parse_kv(module_args))
- if not 'msg' in args:
- args['msg'] = 'Failed as requested from task'
-
- result = dict(failed=True, msg=args['msg'])
- return ReturnData(conn=conn, result=result)
diff --git a/v1/ansible/runner/action_plugins/fetch.py b/v1/ansible/runner/action_plugins/fetch.py
deleted file mode 100644
index 27d2f6b3c6..0000000000
--- a/v1/ansible/runner/action_plugins/fetch.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import random
-import traceback
-import tempfile
-import base64
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import module_common
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for fetch operations '''
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- dest = options.get('dest', None)
- flat = options.get('flat', False)
- flat = utils.boolean(flat)
- fail_on_missing = options.get('fail_on_missing', False)
- fail_on_missing = utils.boolean(fail_on_missing)
- validate_checksum = options.get('validate_checksum', None)
- if validate_checksum is not None:
- validate_checksum = utils.boolean(validate_checksum)
- # Alias for validate_checksum (old way of specifying it)
- validate_md5 = options.get('validate_md5', None)
- if validate_md5 is not None:
- validate_md5 = utils.boolean(validate_md5)
- if validate_md5 is None and validate_checksum is None:
- # Default
- validate_checksum = True
- elif validate_checksum is None:
- validate_checksum = validate_md5
- elif validate_md5 is not None and validate_checksum is not None:
- results = dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
- return ReturnData(conn, result=results)
-
- if source is None or dest is None:
- results = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, result=results)
-
- source = conn.shell.join_path(source)
- source = self.runner._remote_expand_user(conn, source, tmp)
-
- # calculate checksum for the remote file
- remote_checksum = self.runner._remote_checksum(conn, tmp, source, inject)
-
- # use slurp if sudo and permissions are lacking
- remote_data = None
- if remote_checksum in ('1', '2') or self.runner.become:
- slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject)
- if slurpres.is_successful():
- if slurpres.result['encoding'] == 'base64':
- remote_data = base64.b64decode(slurpres.result['content'])
- if remote_data is not None:
- remote_checksum = utils.checksum_s(remote_data)
- # the source path may have been expanded on the
- # target system, so we compare it here and use the
- # expanded version if it's different
- remote_source = slurpres.result.get('source')
- if remote_source and remote_source != source:
- source = remote_source
-
- # calculate the destination name
- if os.path.sep not in conn.shell.join_path('a', ''):
- source_local = source.replace('\\', '/')
- else:
- source_local = source
-
- dest = os.path.expanduser(dest)
- if flat:
- if dest.endswith("/"):
- # if the path ends with "/", we'll use the source filename as the
- # destination filename
- base = os.path.basename(source_local)
- dest = os.path.join(dest, base)
- if not dest.startswith("/"):
- # if dest does not start with "/", we'll assume a relative path
- dest = utils.path_dwim(self.runner.basedir, dest)
- else:
- # files are saved in dest dir, with a subdir for each host, then the filename
- dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), inject['inventory_hostname'], source_local)
-
- dest = dest.replace("//","/")
-
- if remote_checksum in ('0', '1', '2', '3', '4'):
- # these don't fail because you may want to transfer a log file that possibly MAY exist
- # but keep going to fetch other log files
- if remote_checksum == '0':
- result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False)
- elif remote_checksum == '1':
- if fail_on_missing:
- result = dict(failed=True, msg="the remote file does not exist", file=source)
- else:
- result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
- elif remote_checksum == '2':
- result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
- elif remote_checksum == '3':
- result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
- elif remote_checksum == '4':
- result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
- return ReturnData(conn=conn, result=result)
-
- # calculate checksum for the local file
- local_checksum = utils.checksum(dest)
-
- if remote_checksum != local_checksum:
- # create the containing directories, if needed
- if not os.path.isdir(os.path.dirname(dest)):
- os.makedirs(os.path.dirname(dest))
-
- # fetch the file and check for changes
- if remote_data is None:
- conn.fetch_file(source, dest)
- else:
- f = open(dest, 'w')
- f.write(remote_data)
- f.close()
- new_checksum = utils.secure_hash(dest)
- # For backwards compatibility. We'll return None on FIPS enabled
- # systems
- try:
- new_md5 = utils.md5(dest)
- except ValueError:
- new_md5 = None
-
- if validate_checksum and new_checksum != remote_checksum:
- result = dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
- return ReturnData(conn=conn, result=result)
- result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
- return ReturnData(conn=conn, result=result)
- else:
- # For backwards compatibility. We'll return None on FIPS enabled
- # systems
- try:
- local_md5 = utils.md5(dest)
- except ValueError:
- local_md5 = None
-
- result = dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
- return ReturnData(conn=conn, result=result)
-
diff --git a/v1/ansible/runner/action_plugins/group_by.py b/v1/ansible/runner/action_plugins/group_by.py
deleted file mode 100644
index 25c2073fa0..0000000000
--- a/v1/ansible/runner/action_plugins/group_by.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv, check_conditional
-import ansible.utils.template as template
-
-class ActionModule(object):
- ''' Create inventory groups based on variables '''
-
- ### We need to be able to modify the inventory
- BYPASS_HOST_LOOP = True
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # the group_by module does not need to pay attention to check mode.
- # it always runs.
-
- # module_args and complex_args have already been templated for the first host.
- # Use them here only to check that a key argument is provided.
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(parse_kv(module_args))
- if not 'key' in args:
- raise ae("'key' is a required argument.")
-
- vv("created 'group_by' ActionModule: key=%s"%(args['key']))
-
- inventory = self.runner.inventory
-
- result = {'changed': False}
-
- ### find all groups
- groups = {}
-
- for host in self.runner.host_set:
- data = {}
- data.update(inject)
- data.update(inject['hostvars'][host])
- conds = self.runner.conditional
- if type(conds) != list:
- conds = [ conds ]
- next_host = False
- for cond in conds:
- if not check_conditional(cond, self.runner.basedir, data, fail_on_undefined=self.runner.error_on_undefined_vars):
- next_host = True
- break
- if next_host:
- continue
-
- # Template original module_args and complex_args from runner for each host.
- host_module_args = template.template(self.runner.basedir, self.runner.module_args, data)
- host_complex_args = template.template(self.runner.basedir, self.runner.complex_args, data)
- host_args = {}
- if host_complex_args:
- host_args.update(host_complex_args)
- host_args.update(parse_kv(host_module_args))
-
- group_name = host_args['key']
- group_name = group_name.replace(' ','-')
- if group_name not in groups:
- groups[group_name] = []
- groups[group_name].append(host)
-
- result['groups'] = groups
-
- ### add to inventory
- for group, hosts in groups.items():
- inv_group = inventory.get_group(group)
- if not inv_group:
- inv_group = ansible.inventory.Group(name=group)
- inventory.add_group(inv_group)
- inventory.get_group('all').add_child_group(inv_group)
- inv_group.vars = inventory.get_group_variables(group, update_cached=False, vault_password=inventory._vault_password)
- for host in hosts:
- if host in self.runner.inventory._vars_per_host:
- del self.runner.inventory._vars_per_host[host]
- inv_host = inventory.get_host(host)
- if not inv_host:
- inv_host = ansible.inventory.Host(name=host)
- if inv_group not in inv_host.get_groups():
- result['changed'] = True
- inv_group.add_host(inv_host)
-
- return ReturnData(conn=conn, comm_ok=True, result=result)
diff --git a/v1/ansible/runner/action_plugins/include_vars.py b/v1/ansible/runner/action_plugins/include_vars.py
deleted file mode 100644
index d6ce52cf00..0000000000
--- a/v1/ansible/runner/action_plugins/include_vars.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# (c) 2013-2014, Benno Joy <benno@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if not module_args:
- result = dict(failed=True, msg="No source file given")
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
- source = module_args
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'vars', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- if os.path.exists(source):
- data = utils.parse_yaml_from_file(source, vault_password=self.runner.vault_pass)
- if data and type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % source)
- elif data is None:
- data = {}
- result = dict(ansible_facts=data)
- return ReturnData(conn=conn, comm_ok=True, result=result)
- else:
- result = dict(failed=True, msg="Source file not found.", file=source)
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
diff --git a/v1/ansible/runner/action_plugins/normal.py b/v1/ansible/runner/action_plugins/normal.py
deleted file mode 100644
index 8500c6641c..0000000000
--- a/v1/ansible/runner/action_plugins/normal.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import random
-import traceback
-import tempfile
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import module_common
-from ansible.runner.return_data import ReturnData
-from ansible.callbacks import vv, vvv
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' transfer & execute a module that is not 'copy' or 'template' '''
-
- module_args = self.runner._complex_args_hack(complex_args, module_args)
-
- if self.runner.noop_on_check(inject):
- if module_name in [ 'shell', 'command' ]:
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name))
- # else let the module parsing code decide, though this will only be allowed for AnsibleModuleCommon using
- # python modules for now
- module_args += " CHECKMODE=True"
-
- if self.runner.no_log:
- module_args += " NO_LOG=True"
-
- # shell and command are the same module
- if module_name == 'shell':
- module_name = 'command'
- module_args += " #USE_SHELL"
-
- vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
- return self.runner._execute_module(conn, tmp, module_name, module_args, inject=inject, complex_args=complex_args)
-
-
diff --git a/v1/ansible/runner/action_plugins/patch.py b/v1/ansible/runner/action_plugins/patch.py
deleted file mode 100644
index 29d4f7eca5..0000000000
--- a/v1/ansible/runner/action_plugins/patch.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-
-import os
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- remote_src = utils.boolean(options.get('remote_src', 'no'))
-
- if src is None:
- result = dict(failed=True, msg="src is required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- if remote_src:
- return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
-
- # Source is local
- if '_original_file' in inject:
- src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
- else:
- src = utils.path_dwim(self.runner.basedir, src)
-
- if tmp is None or "-tmp-" not in tmp:
- tmp = self.runner._make_tmp_path(conn)
-
- tmp_src = conn.shell.join_path(tmp, os.path.basename(src))
- conn.put_file(src, tmp_src)
-
- if self.runner.become and self.runner.become_user != 'root':
- if not self.runner.noop_on_check(inject):
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
-
- new_module_args = dict(
- src=tmp_src,
- )
-
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args = utils.merge_module_args(module_args, new_module_args)
-
- return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
diff --git a/v1/ansible/runner/action_plugins/pause.py b/v1/ansible/runner/action_plugins/pause.py
deleted file mode 100644
index d0c9b53db2..0000000000
--- a/v1/ansible/runner/action_plugins/pause.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import getch, parse_kv
-import ansible.utils.template as template
-from termios import tcflush, TCIFLUSH
-import datetime
-import sys
-import time
-
-
-class ActionModule(object):
- ''' pauses execution for a length or time, or until input is received '''
-
- PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
- BYPASS_HOST_LOOP = True
-
- def __init__(self, runner):
- self.runner = runner
- # Set defaults
- self.duration_unit = 'minutes'
- self.prompt = None
- self.seconds = None
- self.result = {'changed': False,
- 'rc': 0,
- 'stderr': '',
- 'stdout': '',
- 'start': None,
- 'stop': None,
- 'delta': None,
- }
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' run the pause action module '''
-
- # note: this module does not need to pay attention to the 'check'
- # flag, it always runs
-
- hosts = ', '.join(self.runner.host_set)
- args = {}
- if complex_args:
- args.update(complex_args)
- # extra template call unneeded?
- args.update(parse_kv(template.template(self.runner.basedir, module_args, inject)))
-
- # Are 'minutes' or 'seconds' keys that exist in 'args'?
- if 'minutes' in args or 'seconds' in args:
- try:
- if 'minutes' in args:
- self.pause_type = 'minutes'
- # The time() command operates in seconds so we need to
- # recalculate for minutes=X values.
- self.seconds = int(args['minutes']) * 60
- else:
- self.pause_type = 'seconds'
- self.seconds = int(args['seconds'])
- self.duration_unit = 'seconds'
- except ValueError, e:
- raise ae("non-integer value given for prompt duration:\n%s" % str(e))
- # Is 'prompt' a key in 'args'?
- elif 'prompt' in args:
- self.pause_type = 'prompt'
- self.prompt = "[%s]\n%s:\n" % (hosts, args['prompt'])
- # Is 'args' empty, then this is the default prompted pause
- elif len(args.keys()) == 0:
- self.pause_type = 'prompt'
- self.prompt = "[%s]\nPress enter to continue:\n" % hosts
- # I have no idea what you're trying to do. But it's so wrong.
- else:
- raise ae("invalid pause type given. must be one of: %s" % \
- ", ".join(self.PAUSE_TYPES))
-
- vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
- (self.pause_type, self.duration_unit, self.seconds, self.prompt))
-
- ########################################################################
- # Begin the hard work!
- try:
- self._start()
- if not self.pause_type == 'prompt':
- print "[%s]\nPausing for %s seconds" % (hosts, self.seconds)
- time.sleep(self.seconds)
- else:
- # Clear out any unflushed buffered input which would
- # otherwise be consumed by raw_input() prematurely.
- tcflush(sys.stdin, TCIFLUSH)
- self.result['user_input'] = raw_input(self.prompt.encode(sys.stdout.encoding))
- except KeyboardInterrupt:
- while True:
- print '\nAction? (a)bort/(c)ontinue: '
- c = getch()
- if c == 'c':
- # continue playbook evaluation
- break
- elif c == 'a':
- # abort further playbook evaluation
- raise ae('user requested abort!')
- finally:
- self._stop()
-
- return ReturnData(conn=conn, result=self.result)
-
- def _start(self):
- ''' mark the time of execution for duration calculations later '''
- self.start = time.time()
- self.result['start'] = str(datetime.datetime.now())
- if not self.pause_type == 'prompt':
- print "(^C-c = continue early, ^C-a = abort)"
-
- def _stop(self):
- ''' calculate the duration we actually paused for and then
- finish building the task result string '''
- duration = time.time() - self.start
- self.result['stop'] = str(datetime.datetime.now())
- self.result['delta'] = int(duration)
-
- if self.duration_unit == 'minutes':
- duration = round(duration / 60.0, 2)
- else:
- duration = round(duration, 2)
-
- self.result['stdout'] = "Paused for %s %s" % (duration, self.duration_unit)
diff --git a/v1/ansible/runner/action_plugins/raw.py b/v1/ansible/runner/action_plugins/raw.py
deleted file mode 100644
index e52296b2e7..0000000000
--- a/v1/ansible/runner/action_plugins/raw.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if self.runner.noop_on_check(inject):
- # in --check mode, always skip this module execution
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
-
- executable = ''
- # From library/command, keep in sync
- r = re.compile(r'(^|\s)(executable)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)\s|$)')
- for m in r.finditer(module_args):
- v = m.group(4).replace("\\", "")
- if m.group(2) == "executable":
- executable = v
- module_args = r.sub("", module_args)
-
- result = self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable,
- become=self.runner.become)
- # for some modules (script, raw), the sudo success key
- # may leak into the stdout due to the way the sudo/su
- # command is constructed, so we filter that out here
- if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
- result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
-
- return ReturnData(conn=conn, result=result)
diff --git a/v1/ansible/runner/action_plugins/script.py b/v1/ansible/runner/action_plugins/script.py
deleted file mode 100644
index 1b1aadc7aa..0000000000
--- a/v1/ansible/runner/action_plugins/script.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import shlex
-
-import ansible.constants as C
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-
-class ActionModule(object):
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- if self.runner.noop_on_check(inject):
- # in check mode, always skip this module
- return ReturnData(conn=conn, comm_ok=True,
- result=dict(skipped=True, msg='check mode not supported for this module'))
-
- # extract ansible reserved parameters
- # From library/command keep in sync
- creates = None
- removes = None
- r = re.compile(r'(^|\s)(creates|removes)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)')
- for m in r.finditer(module_args):
- v = m.group(4).replace("\\", "")
- if m.group(2) == "creates":
- creates = v
- elif m.group(2) == "removes":
- removes = v
- module_args = r.sub("", module_args)
-
- if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
- module_args_tmp = "path=%s" % creates
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args, persist_files=True)
- stat = module_return.result.get('stat', None)
- if stat and stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s exists" % creates)
- )
- )
- if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
- module_args_tmp = "path=%s" % removes
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args, persist_files=True)
- stat = module_return.result.get('stat', None)
- if stat and not stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s does not exist" % removes)
- )
- )
-
- # Decode the result of shlex.split() to UTF8 to get around a bug in that's been fixed in Python 2.7 but not Python 2.6.
- # See: http://bugs.python.org/issue6988
- tokens = shlex.split(module_args.encode('utf8'))
- tokens = [s.decode('utf8') for s in tokens]
- # extract source script
- source = tokens[0]
-
- # FIXME: error handling
- args = " ".join(tokens[1:])
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # transfer the file to a remote tmp location
- source = source.replace('\x00', '') # why does this happen here?
- args = args.replace('\x00', '') # why does this happen here?
- tmp_src = conn.shell.join_path(tmp, os.path.basename(source))
- tmp_src = tmp_src.replace('\x00', '')
-
- conn.put_file(source, tmp_src)
-
- sudoable = True
- # set file permissions, more permissive when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- chmod_mode = 'a+rx'
- sudoable = False
- else:
- chmod_mode = '+rx'
- self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, become=self.runner.become)
-
- # add preparation steps to one ssh roundtrip executing the script
- env_string = self.runner._compute_environment_string(conn, inject)
- module_args = ' '.join([env_string, tmp_src, args])
-
- handler = utils.plugins.action_loader.get('raw', self.runner)
- result = handler.run(conn, tmp, 'raw', module_args, inject)
-
- # clean up after
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
- self.runner._remove_tmp_path(conn, tmp)
-
- result.result['changed'] = True
-
- return result
diff --git a/v1/ansible/runner/action_plugins/set_fact.py b/v1/ansible/runner/action_plugins/set_fact.py
deleted file mode 100644
index 7ac972cac6..0000000000
--- a/v1/ansible/runner/action_plugins/set_fact.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2013 Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for running operations on master '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
-
- # parse the k=v arguments and convert any special boolean
- # strings into proper booleans (issue #8629)
- parsed_args = utils.parse_kv(module_args)
- for k,v in parsed_args.iteritems():
- # convert certain strings to boolean values
- if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
- parsed_args[k] = utils.boolean(v)
-
- # and finally update the options with the parsed/modified args
- options.update(parsed_args)
-
- return ReturnData(conn=conn, result=dict(ansible_facts=options))
diff --git a/v1/ansible/runner/action_plugins/synchronize.py b/v1/ansible/runner/action_plugins/synchronize.py
deleted file mode 100644
index fb82194b00..0000000000
--- a/v1/ansible/runner/action_plugins/synchronize.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os.path
-
-from ansible import utils
-from ansible import constants
-from ansible.runner.return_data import ReturnData
-import ansible.utils.template as template
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
- self.inject = None
-
- def _get_absolute_path(self, path=None):
- if 'vars' in self.inject:
- if '_original_file' in self.inject['vars']:
- # roles
- original_path = path
- path = utils.path_dwim_relative(self.inject['_original_file'], 'files', path, self.runner.basedir)
- if original_path and original_path[-1] == '/' and path[-1] != '/':
- # make sure the dwim'd path ends in a trailing "/"
- # if the original path did
- path += '/'
-
- return path
-
- def _process_origin(self, host, path, user):
-
- if not host in ['127.0.0.1', 'localhost']:
- if user:
- return '%s@%s:%s' % (user, host, path)
- else:
- return '%s:%s' % (host, path)
- else:
- if not ':' in path:
- if not path.startswith('/'):
- path = self._get_absolute_path(path=path)
- return path
-
- def _process_remote(self, host, path, user):
- transport = self.runner.transport
- return_data = None
- if not host in ['127.0.0.1', 'localhost'] or transport != "local":
- if user:
- return_data = '%s@%s:%s' % (user, host, path)
- else:
- return_data = '%s:%s' % (host, path)
- else:
- return_data = path
-
- if not ':' in return_data:
- if not return_data.startswith('/'):
- return_data = self._get_absolute_path(path=return_data)
-
- return return_data
-
- def setup(self, module_name, inject):
- ''' Always default to localhost as delegate if None defined '''
-
- self.inject = inject
-
- # Store original transport and sudo values.
- self.original_transport = inject.get('ansible_connection', self.runner.transport)
- self.original_become = self.runner.become
- self.transport_overridden = False
-
- if inject.get('delegate_to') is None:
- inject['delegate_to'] = '127.0.0.1'
- # IF original transport is not local, override transport and disable sudo.
- if self.original_transport != 'local':
- inject['ansible_connection'] = 'local'
- self.transport_overridden = True
- self.runner.become = False
-
- def run(self, conn, tmp, module_name, module_args,
- inject, complex_args=None, **kwargs):
-
- ''' generates params and passes them on to the rsync module '''
-
- self.inject = inject
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- use_ssh_args = options.pop('use_ssh_args', None)
-
- src = template.template(self.runner.basedir, src, inject)
- dest = template.template(self.runner.basedir, dest, inject)
- use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject)
-
- try:
- options['local_rsync_path'] = inject['ansible_rsync_path']
- except KeyError:
- pass
-
- # from the perspective of the rsync call the delegate is the localhost
- src_host = '127.0.0.1'
- dest_host = inject.get('ansible_ssh_host', inject['inventory_hostname'])
-
- # allow ansible_ssh_host to be templated
- dest_host = template.template(self.runner.basedir, dest_host, inject, fail_on_undefined=True)
- dest_is_local = dest_host in ['127.0.0.1', 'localhost']
-
- # CHECK FOR NON-DEFAULT SSH PORT
- dest_port = options.get('dest_port')
- inv_port = inject.get('ansible_ssh_port', inject['inventory_hostname'])
- if inv_port != dest_port and inv_port != inject['inventory_hostname']:
- options['dest_port'] = inv_port
-
- # edge case: explicit delegate and dest_host are the same
- if dest_host == inject['delegate_to']:
- dest_host = '127.0.0.1'
-
- # SWITCH SRC AND DEST PER MODE
- if options.get('mode', 'push') == 'pull':
- (dest_host, src_host) = (src_host, dest_host)
-
- # CHECK DELEGATE HOST INFO
- use_delegate = False
- if conn.delegate != conn.host:
- if 'hostvars' in inject:
- if conn.delegate in inject['hostvars'] and self.original_transport != 'local':
- # use a delegate host instead of localhost
- use_delegate = True
-
- # COMPARE DELEGATE, HOST AND TRANSPORT
- process_args = False
- if not dest_host is src_host and self.original_transport != 'local':
- # interpret and inject remote host info into src or dest
- process_args = True
-
- # MUNGE SRC AND DEST PER REMOTE_HOST INFO
- if process_args or use_delegate:
-
- user = None
- if utils.boolean(options.get('set_remote_user', 'yes')):
- if use_delegate:
- user = inject['hostvars'][conn.delegate].get('ansible_ssh_user')
-
- if not use_delegate or not user:
- user = inject.get('ansible_ssh_user',
- self.runner.remote_user)
-
- if use_delegate:
- # FIXME
- private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
- else:
- private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
-
- private_key = template.template(self.runner.basedir, private_key, inject, fail_on_undefined=True)
-
- if not private_key is None:
- private_key = os.path.expanduser(private_key)
- options['private_key'] = private_key
-
- # use the mode to define src and dest's url
- if options.get('mode', 'push') == 'pull':
- # src is a remote path: <user>@<host>, dest is a local path
- src = self._process_remote(src_host, src, user)
- dest = self._process_origin(dest_host, dest, user)
- else:
- # src is a local path, dest is a remote path: <user>@<host>
- src = self._process_origin(src_host, src, user)
- dest = self._process_remote(dest_host, dest, user)
-
- options['src'] = src
- options['dest'] = dest
- if 'mode' in options:
- del options['mode']
- if use_ssh_args:
- options['ssh_args'] = constants.ANSIBLE_SSH_ARGS
-
- # Allow custom rsync path argument.
- rsync_path = options.get('rsync_path', None)
-
- # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
- if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo':
- rsync_path = 'sudo rsync'
-
- # make sure rsync path is quoted.
- if rsync_path:
- options['rsync_path'] = '"' + rsync_path + '"'
-
- module_args = ""
- if self.runner.noop_on_check(inject):
- module_args = "CHECKMODE=True"
-
- # run the module and store the result
- result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject)
-
- # reset the sudo property
- self.runner.become = self.original_become
-
- return result
-
diff --git a/v1/ansible/runner/action_plugins/template.py b/v1/ansible/runner/action_plugins/template.py
deleted file mode 100644
index 5c9be9e079..0000000000
--- a/v1/ansible/runner/action_plugins/template.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pipes
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def get_checksum(self, conn, tmp, dest, inject, try_directory=False, source=None):
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if remote_checksum in ('0', '2', '3', '4'):
- # Note: 1 means the file is not present which is fine; template
- # will create it. 3 means directory was specified instead of file
- # which requires special handling
- if try_directory and remote_checksum == '3' and source:
- # If the user specified a directory name as their dest then we
- # have to check the checksum of dest/basename(src). This is
- # the same behaviour as cp foo.txt /var/tmp/ so users expect
- # it to work.
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
- remote_checksum = self.get_checksum(conn, tmp, dest, inject, try_directory=False)
- if remote_checksum not in ('0', '2', '3', '4'):
- return remote_checksum
-
- result = dict(failed=True, msg="failed to checksum remote file."
- " Checksum error code: %s" % remote_checksum)
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
- return remote_checksum
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for template operations '''
-
- if not self.runner.is_playbook:
- raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- source = options.get('src', None)
- dest = options.get('dest', None)
-
- if (source is None and 'first_available_file' not in inject) or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
-
- if 'first_available_file' in inject:
- found = False
- for fn in self.runner.module_vars.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- result = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, comm_ok=False, result=result)
- else:
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # template the source data locally & get ready to transfer
- try:
- resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
- except Exception, e:
- result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # Expand any user home dir specification
- dest = self.runner._remote_expand_user(conn, dest, tmp)
-
- directory_prepended = False
- if dest.endswith("/"): # CCTODO: Fix path for Windows hosts.
- directory_prepended = True
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
-
- local_checksum = utils.checksum_s(resultant)
- remote_checksum = self.get_checksum(conn, tmp, dest, inject, not directory_prepended, source=source)
-
- if local_checksum != remote_checksum:
-
- # template is different from the remote value
-
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- if self.runner.diff:
- # using persist_files to keep the temp directory around to avoid needing to grab another
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject, complex_args=complex_args)
- if res.result.get('changed', False):
- res.diff = dict(before=dest_contents, after=resultant)
- return res
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links. When doing that, we have to set
- # original_basename to the template just in case the dest is
- # a directory.
- module_args = ''
- new_module_args = dict(
- src=None,
- original_basename=os.path.basename(source),
- follow=True,
- )
- # be sure to inject the check mode param into the module args and
- # rely on the file module to report its changed status
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- options.update(new_module_args)
- return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=options)
-
diff --git a/v1/ansible/runner/action_plugins/unarchive.py b/v1/ansible/runner/action_plugins/unarchive.py
deleted file mode 100644
index 312a2265c0..0000000000
--- a/v1/ansible/runner/action_plugins/unarchive.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-import pipes
-
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- dest = options.get('dest', None)
- copy = utils.boolean(options.get('copy', 'yes'))
- creates = options.get('creates', None)
-
- if source is None or dest is None:
- result = dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
-
- if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
- module_args_tmp = ""
- complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False)
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args_tmp, delete_remote_tmp=False)
- stat = module_return.result.get('stat', None)
- if stat and stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s exists" % creates)
- )
- )
-
- dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts.
- source = template.template(self.runner.basedir, os.path.expanduser(source), inject)
- if copy:
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
- if remote_checksum == '4':
- result = dict(failed=True, msg="python isn't present on the system. Unable to compute checksum")
- return ReturnData(conn=conn, result=result)
- if remote_checksum != '3':
- result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest)
- return ReturnData(conn=conn, result=result)
-
- if copy:
- # transfer the file to a remote tmp location
- tmp_src = tmp + 'source'
- conn.put_file(source, tmp_src)
-
- # handle diff mode client side
- # handle check mode client side
- # fix file permissions when the copy is done as a different user
- if copy:
- if self.runner.become and self.runner.become_user != 'root':
- if not self.runner.noop_on_check(inject):
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- original_basename=os.path.basename(source),
- )
-
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args = utils.merge_module_args(module_args, new_module_args)
- else:
- module_args = "%s original_basename=%s" % (module_args, pipes.quote(os.path.basename(source)))
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- module_args += " CHECKMODE=True"
- return self.runner._execute_module(conn, tmp, 'unarchive', module_args, inject=inject, complex_args=complex_args)
diff --git a/v1/ansible/runner/action_plugins/win_copy.py b/v1/ansible/runner/action_plugins/win_copy.py
deleted file mode 100644
index a62dfb9985..0000000000
--- a/v1/ansible/runner/action_plugins/win_copy.py
+++ /dev/null
@@ -1,377 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.constants as C
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-import json
-import stat
-import tempfile
-import pipes
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- content = options.get('content', None)
- dest = options.get('dest', None)
- raw = utils.boolean(options.get('raw', 'no'))
- force = utils.boolean(options.get('force', 'yes'))
-
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
- if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- result=dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
- elif (source is not None or 'first_available_file' in inject) and content is not None:
- result=dict(failed=True, msg="src and content are mutually exclusive")
- return ReturnData(conn=conn, result=result)
-
- # Check if the source ends with a "/"
- source_trailing_slash = False
- if source:
- source_trailing_slash = source.endswith("/")
-
- # Define content_tempfile in case we set it after finding content populated.
- content_tempfile = None
-
- # If content is defined make a temp file and write the content into it.
- if content is not None:
- try:
- # If content comes to us as a dict it should be decoded json.
- # We need to encode it back into a string to write it out.
- if type(content) is dict:
- content_tempfile = self._create_content_tempfile(json.dumps(content))
- else:
- content_tempfile = self._create_content_tempfile(content)
- source = content_tempfile
- except Exception, err:
- result = dict(failed=True, msg="could not write content temp file: %s" % err)
- return ReturnData(conn=conn, result=result)
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- elif 'first_available_file' in inject:
- found = False
- for fn in inject.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- results = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, result=results)
- else:
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
- source_files = []
-
- # If source is a directory populate our list else source is a file and translate it to a tuple.
- if os.path.isdir(source):
- # Get the amount of spaces to remove to get the relative path.
- if source_trailing_slash:
- sz = len(source) + 1
- else:
- sz = len(source.rsplit('/', 1)[0]) + 1
-
- # Walk the directory and append the file tuples to source_files.
- for base_path, sub_folders, files in os.walk(source):
- for file in files:
- full_path = os.path.join(base_path, file)
- rel_path = full_path[sz:]
- source_files.append((full_path, rel_path))
-
- # If it's recursive copy, destination is always a dir,
- # explicitly mark it so (note - copy module relies on this).
- if not conn.shell.path_has_trailing_slash(dest):
- dest = conn.shell.join_path(dest, '')
- else:
- source_files.append((source, os.path.basename(source)))
-
- changed = False
- diffs = []
- module_result = {"changed": False}
-
- # A register for if we executed a module.
- # Used to cut down on command calls when not recursive.
- module_executed = False
-
- # Tell _execute_module to delete the file if there is one file.
- delete_remote_tmp = (len(source_files) == 1)
-
- # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
- if not delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- # expand any user home dir specifier
- dest = self.runner._remote_expand_user(conn, dest, tmp_path)
-
- for source_full, source_rel in source_files:
- # Generate a hash of the local file.
- local_checksum = utils.checksum(source_full)
-
- # If local_checksum is not defined we can't find the file so we should fail out.
- if local_checksum is None:
- result = dict(failed=True, msg="could not find src=%s" % source_full)
- return ReturnData(conn=conn, result=result)
-
- # This is kind of optimization - if user told us destination is
- # dir, do path manipulation right away, otherwise we still check
- # for dest being a dir via remote call below.
- if conn.shell.path_has_trailing_slash(dest):
- dest_file = conn.shell.join_path(dest, source_rel)
- else:
- dest_file = conn.shell.join_path(dest)
-
- # Attempt to get the remote checksum
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '3':
- # The remote_checksum was executed on a directory.
- if content is not None:
- # If source was defined as content remove the temporary file and fail out.
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- result = dict(failed=True, msg="can not use content with a dir as dest")
- return ReturnData(conn=conn, result=result)
- else:
- # Append the relative source location to the destination and retry remote_checksum.
- dest_file = conn.shell.join_path(dest, source_rel)
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum != '1' and not force:
- # remote_file does not exist so continue to next iteration.
- continue
-
- if local_checksum != remote_checksum:
- # The checksums don't match and we will change or error out.
- changed = True
-
- # Create a tmp_path if missing only if this is not recursive.
- # If this is recursive we already have a tmp_path.
- if delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- if self.runner.diff and not raw:
- diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
- else:
- diff = {}
-
- if self.runner.noop_on_check(inject):
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- diffs.append(diff)
- changed = True
- module_result = dict(changed=True)
- continue
-
- # Define a remote directory that we will copy the file to.
- tmp_src = tmp_path + 'source'
-
- if not raw:
- conn.put_file(source_full, tmp_src)
- else:
- conn.put_file(source_full, dest_file)
-
- # We have copied the file remotely and no longer require our content_tempfile
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' and not raw:
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
-
- if raw:
- # Continue to next iteration if raw is defined.
- continue
-
- # Run the copy module
-
- # src and dest here come after original and override them
- # we pass dest only to make sure it includes trailing slash in case of recursive copy
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- module_return = self.runner._execute_module(conn, tmp_path, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- else:
- # no need to transfer the file, already correct md5, but still need to call
- # the file module in case we want to change attributes
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- if raw:
- # Continue to next iteration if raw is defined.
- # self.runner._remove_tmp_path(conn, tmp_path)
- continue
-
- tmp_src = tmp_path + source_rel
-
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- # Execute the file module.
- module_return = self.runner._execute_module(conn, tmp_path, 'win_file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- module_result = module_return.result
- if not module_result.get('checksum'):
- module_result['checksum'] = local_checksum
- if module_result.get('failed') == True:
- return module_return
- if module_result.get('changed') == True:
- changed = True
-
- # Delete tmp_path if we were recursive or if we did not execute a module.
- if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
- or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
- self.runner._remove_tmp_path(conn, tmp_path)
-
- # the file module returns the file path as 'path', but
- # the copy module uses 'dest', so add it if it's not there
- if 'path' in module_result and 'dest' not in module_result:
- module_result['dest'] = module_result['path']
-
- # TODO: Support detailed status/diff for multiple files
- if len(source_files) == 1:
- result = module_result
- else:
- result = dict(dest=dest, src=source, changed=changed)
- if len(diffs) == 1:
- return ReturnData(conn=conn, result=result, diff=diffs[0])
- else:
- return ReturnData(conn=conn, result=result)
-
- def _create_content_tempfile(self, content):
- ''' Create a tempfile containing defined content '''
- fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
- try:
- f.write(content)
- except Exception, err:
- os.remove(content_tempfile)
- raise Exception(err)
- finally:
- f.close()
- return content_tempfile
-
- def _get_diff_data(self, conn, tmp, inject, destination, source):
- peek_result = self.runner._execute_module(conn, tmp, 'win_file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
-
- if not peek_result.is_successful():
- return {}
-
- diff = {}
- if peek_result.result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result.result['appears_binary']:
- diff['dst_binary'] = 1
- elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
-
- def _remove_tempfile_if_content_defined(self, content, content_tempfile):
- if content is not None:
- os.remove(content_tempfile)
-
-
- def _result_key_merge(self, options, results):
- # add keys to file module results to mimic copy
- if 'path' in results.result and 'dest' not in results.result:
- results.result['dest'] = results.result['path']
- del results.result['path']
- return results
diff --git a/v1/ansible/runner/action_plugins/win_template.py b/v1/ansible/runner/action_plugins/win_template.py
deleted file mode 100644
index 7bde4bd510..0000000000
--- a/v1/ansible/runner/action_plugins/win_template.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pipes
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for template operations '''
-
- if not self.runner.is_playbook:
- raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- source = options.get('src', None)
- dest = options.get('dest', None)
-
- if (source is None and 'first_available_file' not in inject) or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
-
- if 'first_available_file' in inject:
- found = False
- for fn in self.runner.module_vars.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- result = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, comm_ok=False, result=result)
- else:
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- if conn.shell.path_has_trailing_slash(dest):
- base = os.path.basename(source)
- dest = conn.shell.join_path(dest, base)
-
- # template the source data locally & get ready to transfer
- try:
- resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
- except Exception, e:
- result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- local_checksum = utils.checksum_s(resultant)
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if local_checksum != remote_checksum:
-
- # template is different from the remote value
-
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- if self.runner.diff:
- # using persist_files to keep the temp directory around to avoid needing to grab another
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args)
- if res.result.get('changed', False):
- res.diff = dict(before=dest_contents, after=resultant)
- return res
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links
- new_module_args = dict(
- src=None,
- follow=True,
- )
- # be sure to inject the check mode param into the module args and
- # rely on the file module to report its changed status
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- module_args = utils.merge_module_args(module_args, new_module_args)
- return self.runner._execute_module(conn, tmp, 'win_file', module_args, inject=inject, complex_args=complex_args)
-
diff --git a/v1/ansible/runner/connection.py b/v1/ansible/runner/connection.py
deleted file mode 100644
index 2ea484f70b..0000000000
--- a/v1/ansible/runner/connection.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# (c) 2012-2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-################################################
-
-import os
-import stat
-import errno
-
-from ansible import utils
-from ansible.errors import AnsibleError
-
-class Connector(object):
- ''' Handles abstract connections to remote hosts '''
-
- def __init__(self, runner):
- self.runner = runner
-
- def connect(self, host, port, user, password, transport, private_key_file, delegate_host):
- conn = utils.plugins.connection_loader.get(transport, self.runner, host, port, user=user, password=password, private_key_file=private_key_file)
- if conn is None:
- raise AnsibleError("unsupported connection type: %s" % transport)
- conn.delegate = delegate_host
- if private_key_file:
- # If private key is readable by user other than owner, flag an error
- st = None
- try:
- st = os.stat(private_key_file)
- except (IOError, OSError), e:
- if e.errno != errno.ENOENT: # file is missing, might be agent
- raise(e)
-
- if st is not None and st.st_mode & (stat.S_IRGRP | stat.S_IROTH):
- raise AnsibleError("private_key_file (%s) is group-readable or world-readable and thus insecure - "
- "you will probably get an SSH failure"
- % (private_key_file,))
- self.active = conn.connect()
- return self.active
diff --git a/v1/ansible/runner/connection_plugins/__init__.py b/v1/ansible/runner/connection_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/connection_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/connection_plugins/accelerate.py b/v1/ansible/runner/connection_plugins/accelerate.py
deleted file mode 100644
index 0627267c16..0000000000
--- a/v1/ansible/runner/connection_plugins/accelerate.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import base64
-import socket
-import struct
-import time
-from ansible.callbacks import vvv, vvvv
-from ansible.errors import AnsibleError, AnsibleFileNotFound
-from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
-from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
-from ansible import utils
-from ansible import constants
-
-# the chunk size to read and send, assuming mtu 1500 and
-# leaving room for base64 (+33%) encoding and header (8 bytes)
-# ((1400-8)/4)*3) = 1044
-# which leaves room for the TCP/IP header. We set this to a
-# multiple of the value to speed up file reads.
-CHUNK_SIZE=1044*20
-
-class Connection(object):
- ''' raw socket accelerated connection '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
-
- self.runner = runner
- self.host = host
- self.context = None
- self.conn = None
- self.user = user
- self.key = utils.key_for_hostname(host)
- self.port = port[0]
- self.accport = port[1]
- self.is_connected = False
- self.has_pipelining = False
- self.become_methods_supported=['sudo']
-
- if not self.port:
- self.port = constants.DEFAULT_REMOTE_PORT
- elif not isinstance(self.port, int):
- self.port = int(self.port)
-
- if not self.accport:
- self.accport = constants.ACCELERATE_PORT
- elif not isinstance(self.accport, int):
- self.accport = int(self.accport)
-
- if self.runner.original_transport == "paramiko":
- self.ssh = ParamikoConnection(
- runner=self.runner,
- host=self.host,
- port=self.port,
- user=self.user,
- password=password,
- private_key_file=private_key_file
- )
- else:
- self.ssh = SSHConnection(
- runner=self.runner,
- host=self.host,
- port=self.port,
- user=self.user,
- password=password,
- private_key_file=private_key_file
- )
-
- if not getattr(self.ssh, 'shell', None):
- self.ssh.shell = utils.plugins.shell_loader.get('sh')
-
- # attempt to work around shared-memory funness
- if getattr(self.runner, 'aes_keys', None):
- utils.AES_KEYS = self.runner.aes_keys
-
- def _execute_accelerate_module(self):
- args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
- base64.b64encode(self.key.__str__()),
- str(self.accport),
- constants.ACCELERATE_DAEMON_TIMEOUT,
- int(utils.VERBOSITY),
- self.runner.accelerate_ipv6,
- )
- if constants.ACCELERATE_MULTI_KEY:
- args += " multi_key=yes"
- inject = dict(password=self.key)
- if getattr(self.runner, 'accelerate_inventory_host', False):
- inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
- else:
- inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
- vvvv("attempting to start up the accelerate daemon...")
- self.ssh.connect()
- tmp_path = self.runner._make_tmp_path(self.ssh)
- return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
-
- def connect(self, allow_ssh=True):
- ''' activates the connection object '''
-
- try:
- if not self.is_connected:
- wrong_user = False
- tries = 3
- self.conn = socket.socket()
- self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
- vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
- while tries > 0:
- try:
- self.conn.connect((self.host,self.accport))
- break
- except socket.error:
- vvvv("connection to %s failed, retrying..." % self.host)
- time.sleep(0.1)
- tries -= 1
- if tries == 0:
- vvv("Could not connect via the accelerated connection, exceeded # of tries")
- raise AnsibleError("FAILED")
- elif wrong_user:
- vvv("Restarting daemon with a different remote_user")
- raise AnsibleError("WRONG_USER")
-
- self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
- if not self.validate_user():
- # the accelerated daemon was started with a
- # different remote_user. The above command
- # should have caused the accelerate daemon to
- # shutdown, so we'll reconnect.
- wrong_user = True
-
- except AnsibleError, e:
- if allow_ssh:
- if "WRONG_USER" in e:
- vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
- time.sleep(5)
- vvv("Falling back to ssh to startup accelerated mode")
- res = self._execute_accelerate_module()
- if not res.is_successful():
- raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
- return self.connect(allow_ssh=False)
- else:
- raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
- self.is_connected = True
- return self
-
- def send_data(self, data):
- packed_len = struct.pack('!Q',len(data))
- return self.conn.sendall(packed_len + data)
-
- def recv_data(self):
- header_len = 8 # size of a packed unsigned long long
- data = b""
- try:
- vvvv("%s: in recv_data(), waiting for the header" % self.host)
- while len(data) < header_len:
- d = self.conn.recv(header_len - len(data))
- if not d:
- vvvv("%s: received nothing, bailing out" % self.host)
- return None
- data += d
- vvvv("%s: got the header, unpacking" % self.host)
- data_len = struct.unpack('!Q',data[:header_len])[0]
- data = data[header_len:]
- vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
- while len(data) < data_len:
- d = self.conn.recv(data_len - len(data))
- if not d:
- vvvv("%s: received nothing, bailing out" % self.host)
- return None
- vvvv("%s: received %d bytes" % (self.host, len(d)))
- data += d
- vvvv("%s: received all of the data, returning" % self.host)
- return data
- except socket.timeout:
- raise AnsibleError("timed out while waiting to receive data")
-
- def validate_user(self):
- '''
- Checks the remote uid of the accelerated daemon vs. the
- one specified for this play and will cause the accel
- daemon to exit if they don't match
- '''
-
- vvvv("%s: sending request for validate_user" % self.host)
- data = dict(
- mode='validate_user',
- username=self.user,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("Failed to send command to %s" % self.host)
-
- vvvv("%s: waiting for validate_user response" % self.host)
- while True:
- # we loop here while waiting for the response, because a
- # long running command may cause us to receive keepalive packets
- # ({"pong":"true"}) rather than the response we want.
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- if "pong" in response:
- # it's a keepalive, go back to waiting
- vvvv("%s: received a keepalive packet" % self.host)
- continue
- else:
- vvvv("%s: received the validate_user response: %s" % (self.host, response))
- break
-
- if response.get('failed'):
- return False
- else:
- return response.get('rc') == 0
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- if executable == "":
- executable = constants.DEFAULT_EXECUTABLE
-
- if self.runner.become and sudoable:
- cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
-
- vvv("EXEC COMMAND %s" % cmd)
-
- data = dict(
- mode='command',
- cmd=cmd,
- tmp_path=tmp_path,
- executable=executable,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("Failed to send command to %s" % self.host)
-
- while True:
- # we loop here while waiting for the response, because a
- # long running command may cause us to receive keepalive packets
- # ({"pong":"true"}) rather than the response we want.
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- if "pong" in response:
- # it's a keepalive, go back to waiting
- vvvv("%s: received a keepalive packet" % self.host)
- continue
- else:
- vvvv("%s: received the response" % self.host)
- break
-
- return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
-
- def put_file(self, in_path, out_path):
-
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
-
- fd = file(in_path, 'rb')
- fstat = os.stat(in_path)
- try:
- vvv("PUT file is %d bytes" % fstat.st_size)
- last = False
- while fd.tell() <= fstat.st_size and not last:
- vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
- data = fd.read(CHUNK_SIZE)
- if fd.tell() >= fstat.st_size:
- last = True
- data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
- if self.runner.become:
- data['user'] = self.runner.become_user
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
-
- if self.send_data(data):
- raise AnsibleError("failed to send the file to %s" % self.host)
-
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- if response.get('failed',False):
- raise AnsibleError("failed to put the file in the requested location")
- finally:
- fd.close()
- vvvv("waiting for final response after PUT")
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- if response.get('failed',False):
- raise AnsibleError("failed to put the file in the requested location")
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- data = dict(mode='fetch', in_path=in_path)
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
-
- fh = open(out_path, "w")
- try:
- bytes = 0
- while True:
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- if response.get('failed', False):
- raise AnsibleError("Error during file fetch, aborting")
- out = base64.b64decode(response['data'])
- fh.write(out)
- bytes += len(out)
- # send an empty response back to signify we
- # received the last chunk without errors
- data = utils.jsonify(dict())
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("failed to send ack during file fetch")
- if response.get('last', False):
- break
- finally:
- # we don't currently care about this final response,
- # we just receive it and drop it. It may be used at some
- # point in the future or we may just have the put/fetch
- # operations not send back a final response at all
- response = self.recv_data()
- vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
- fh.close()
-
- def close(self):
- ''' terminate the connection '''
- # Be a good citizen
- try:
- self.conn.close()
- except:
- pass
-
diff --git a/v1/ansible/runner/connection_plugins/chroot.py b/v1/ansible/runner/connection_plugins/chroot.py
deleted file mode 100644
index 3e96047287..0000000000
--- a/v1/ansible/runner/connection_plugins/chroot.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import traceback
-import os
-import shutil
-import subprocess
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local chroot based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.chroot = host
- self.has_pipelining = False
- self.become_methods_supported=C.BECOME_METHODS
-
- if os.geteuid() != 0:
- raise errors.AnsibleError("chroot connection requires running as root")
-
- # we're running as root on the local system so do some
- # trivial checks for ensuring 'host' is actually a chroot'able dir
- if not os.path.isdir(self.chroot):
- raise errors.AnsibleError("%s is not a directory" % self.chroot)
-
- chrootsh = os.path.join(self.chroot, 'bin/sh')
- if not utils.is_executable(chrootsh):
- raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
-
- self.chroot_cmd = distutils.spawn.find_executable('chroot')
- if not self.chroot_cmd:
- raise errors.AnsibleError("chroot command not found in PATH")
-
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
-
- def connect(self, port=None):
- ''' connect to the chroot; nothing to do here '''
-
- vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # We enter chroot as root so we ignore privlege escalation?
-
- if executable:
- local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.chroot)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to chroot '''
-
- if not out_path.startswith(os.path.sep):
- out_path = os.path.join(os.path.sep, out_path)
- normpath = os.path.normpath(out_path)
- out_path = os.path.join(self.chroot, normpath[1:])
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from chroot to local '''
-
- if not in_path.startswith(os.path.sep):
- in_path = os.path.join(os.path.sep, in_path)
- normpath = os.path.normpath(in_path)
- in_path = os.path.join(self.chroot, normpath[1:])
-
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/fireball.py b/v1/ansible/runner/connection_plugins/fireball.py
deleted file mode 100644
index 562fc2eccf..0000000000
--- a/v1/ansible/runner/connection_plugins/fireball.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import base64
-from ansible.callbacks import vvv
-from ansible import utils
-from ansible import errors
-from ansible import constants
-
-HAVE_ZMQ=False
-
-try:
- import zmq
- HAVE_ZMQ=True
-except ImportError:
- pass
-
-class Connection(object):
- ''' ZeroMQ accelerated connection '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
-
- self.runner = runner
- self.has_pipelining = False
-
- # attempt to work around shared-memory funness
- if getattr(self.runner, 'aes_keys', None):
- utils.AES_KEYS = self.runner.aes_keys
-
- self.host = host
- self.key = utils.key_for_hostname(host)
- self.context = None
- self.socket = None
-
- if port is None:
- self.port = constants.ZEROMQ_PORT
- else:
- self.port = port
-
- self.become_methods_supported=[]
-
- def connect(self):
- ''' activates the connection object '''
-
- if not HAVE_ZMQ:
- raise errors.AnsibleError("zmq is not installed")
-
- # this is rough/temporary and will likely be optimized later ...
- self.context = zmq.Context()
- socket = self.context.socket(zmq.REQ)
- addr = "tcp://%s:%s" % (self.host, self.port)
- socket.connect(addr)
- self.socket = socket
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- vvv("EXEC COMMAND %s" % cmd)
-
- if self.runner.become and sudoable:
- raise errors.AnsibleError(
- "When using fireball, do not specify sudo or su to run your tasks. " +
- "Instead sudo the fireball action with sudo. " +
- "Task will communicate with the fireball already running in sudo mode."
- )
-
- data = dict(
- mode='command',
- cmd=cmd,
- tmp_path=tmp_path,
- executable=executable,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
-
- def put_file(self, in_path, out_path):
-
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- data = file(in_path).read()
- data = base64.b64encode(data)
-
- data = dict(mode='put', data=data, out_path=out_path)
- # TODO: support chunked file transfer
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- # no meaningful response needed for this
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- data = dict(mode='fetch', in_path=in_path)
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- response = response['data']
- response = base64.b64decode(response)
-
- fh = open(out_path, "w")
- fh.write(response)
- fh.close()
-
- def close(self):
- ''' terminate the connection '''
- # Be a good citizen
- try:
- self.socket.close()
- self.context.term()
- except:
- pass
-
diff --git a/v1/ansible/runner/connection_plugins/funcd.py b/v1/ansible/runner/connection_plugins/funcd.py
deleted file mode 100644
index 92b7f53605..0000000000
--- a/v1/ansible/runner/connection_plugins/funcd.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# ---
-# The func transport permit to use ansible over func. For people who have already setup
-# func and that wish to play with ansible, this permit to move gradually to ansible
-# without having to redo completely the setup of the network.
-
-HAVE_FUNC=False
-try:
- import func.overlord.client as fc
- HAVE_FUNC=True
-except ImportError:
- pass
-
-import os
-from ansible.callbacks import vvv
-from ansible import errors
-import tempfile
-import shutil
-
-
-class Connection(object):
- ''' Func-based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.has_pipelining = False
- # port is unused, this go on func
- self.port = port
-
- def connect(self, port=None):
- if not HAVE_FUNC:
- raise errors.AnsibleError("func is not installed")
-
- self.client = fc.Client(self.host)
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False,
- executable='/bin/sh', in_data=None):
- ''' run a command on the remote minion '''
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # totally ignores privlege escalation
- vvv("EXEC %s" % (cmd), host=self.host)
- p = self.client.command.run(cmd)[self.host]
- return (p[0], '', p[1], p[2])
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
-
- out_path = self._normalize_path(out_path, '/')
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- self.client.local.copyfile.send(in_path, out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
-
- in_path = self._normalize_path(in_path, '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- # need to use a tmp dir due to difference of semantic for getfile
- # ( who take a # directory as destination) and fetch_file, who
- # take a file directly
- tmpdir = tempfile.mkdtemp(prefix="func_ansible")
- self.client.local.getfile.get(in_path, tmpdir)
- shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)),
- out_path)
- shutil.rmtree(tmpdir)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/jail.py b/v1/ansible/runner/connection_plugins/jail.py
deleted file mode 100644
index c7b61bc638..0000000000
--- a/v1/ansible/runner/connection_plugins/jail.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import traceback
-import os
-import shutil
-import subprocess
-from ansible import errors
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local chroot based connections '''
-
- def _search_executable(self, executable):
- cmd = distutils.spawn.find_executable(executable)
- if not cmd:
- raise errors.AnsibleError("%s command not found in PATH") % executable
- return cmd
-
- def list_jails(self):
- p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
-
- return stdout.split()
-
- def get_jail_path(self):
- p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- # remove \n
- return stdout[:-1]
-
-
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.jail = host
- self.runner = runner
- self.host = host
- self.has_pipelining = False
- self.become_methods_supported=C.BECOME_METHODS
-
- if os.geteuid() != 0:
- raise errors.AnsibleError("jail connection requires running as root")
-
- self.jls_cmd = self._search_executable('jls')
- self.jexec_cmd = self._search_executable('jexec')
-
- if not self.jail in self.list_jails():
- raise errors.AnsibleError("incorrect jail name %s" % self.jail)
-
-
- self.host = host
- # port is unused, since this is local
- self.port = port
-
- def connect(self, port=None):
- ''' connect to the chroot; nothing to do here '''
-
- vvv("THIS IS A LOCAL CHROOT DIR", host=self.jail)
-
- return self
-
- # a modifier
- def _generate_cmd(self, executable, cmd):
- if executable:
- local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd)
- return local_cmd
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # Ignores privilege escalation
- local_cmd = self._generate_cmd(executable, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.jail)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def _copy_file(self, in_path, out_path):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to chroot '''
-
- out_path = self._normalize_path(out_path, self.get_jail_path())
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
-
- self._copy_file(in_path, out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from chroot to local '''
-
- in_path = self._normalize_path(in_path, self.get_jail_path())
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
-
- self._copy_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/libvirt_lxc.py b/v1/ansible/runner/connection_plugins/libvirt_lxc.py
deleted file mode 100644
index 832b78251c..0000000000
--- a/v1/ansible/runner/connection_plugins/libvirt_lxc.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import os
-import subprocess
-from ansible import errors
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local lxc based connections '''
-
- def _search_executable(self, executable):
- cmd = distutils.spawn.find_executable(executable)
- if not cmd:
- raise errors.AnsibleError("%s command not found in PATH") % executable
- return cmd
-
- def _check_domain(self, domain):
- p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- if p.returncode:
- raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.lxc = host
-
- self.cmd = self._search_executable('virsh')
-
- self._check_domain(host)
-
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
- self.become_methods_supported=C.BECOME_METHODS
-
- def connect(self, port=None):
- ''' connect to the lxc; nothing to do here '''
-
- vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
-
- return self
-
- def _generate_cmd(self, executable, cmd):
- if executable:
- local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
- else:
- local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
- return local_cmd
-
- def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # We ignore privilege escalation!
- local_cmd = self._generate_cmd(executable, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.lxc)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to lxc '''
-
- out_path = self._normalize_path(out_path, '/')
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
-
- local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
- vvv("EXEC %s" % (local_cmd), host=self.lxc)
-
- p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate(open(in_path,'rb').read())
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from lxc to local '''
-
- in_path = self._normalize_path(in_path, '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
-
- local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
- vvv("EXEC %s" % (local_cmd), host=self.lxc)
-
- p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
- open(out_path,'wb').write(stdout)
-
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/local.py b/v1/ansible/runner/connection_plugins/local.py
deleted file mode 100644
index beaeb1ae50..0000000000
--- a/v1/ansible/runner/connection_plugins/local.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import traceback
-import os
-import pipes
-import shutil
-import subprocess
-import select
-import fcntl
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv
-
-
-class Connection(object):
- ''' Local based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
- self.has_pipelining = False
-
- # TODO: add su(needs tty), pbrun, pfexec
- self.become_methods_supported=['sudo']
-
- def connect(self, port=None):
- ''' connect to the local host; nothing to do here '''
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the local host '''
-
- # su requires to be run from a terminal, and therefore isn't supported here (yet?)
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- if self.runner.become and sudoable:
- local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe)
- else:
- if executable:
- local_cmd = executable.split() + ['-c', cmd]
- else:
- local_cmd = cmd
- executable = executable.split()[0] if executable else None
-
- vvv("EXEC %s" % (local_cmd), host=self.host)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir, executable=executable,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- if self.runner.become and sudoable and self.runner.become_pass:
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- become_output = ''
- while success_key not in become_output:
-
- if prompt and become_output.endswith(prompt):
- break
- if utils.su_prompts.check_su_prompt(become_output):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout, p.stderr], self.runner.timeout)
- if p.stdout in rfd:
- chunk = p.stdout.read()
- elif p.stderr in rfd:
- chunk = p.stderr.read()
- else:
- stdout, stderr = p.communicate()
- raise errors.AnsibleError('timeout waiting for %s password prompt:\n' % self.runner.become_method + become_output)
- if not chunk:
- stdout, stderr = p.communicate()
- raise errors.AnsibleError('%s output closed while waiting for password prompt:\n' % self.runner.become_method + become_output)
- become_output += chunk
- if success_key not in become_output:
- p.stdin.write(self.runner.become_pass + '\n')
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to local '''
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- ''' fetch a file from local to local -- for copatibility '''
- self.put_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/paramiko_ssh.py b/v1/ansible/runner/connection_plugins/paramiko_ssh.py
deleted file mode 100644
index 8eaf97c3f6..0000000000
--- a/v1/ansible/runner/connection_plugins/paramiko_ssh.py
+++ /dev/null
@@ -1,419 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-# ---
-# The paramiko transport is provided because many distributions, in particular EL6 and before
-# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
-# control machine to be reasonably efficient with connections. Thus paramiko is faster
-# for most users on these platforms. Users with ControlPersist capability can consider
-# using -c ssh or configuring the transport in ansible.cfg.
-
-import warnings
-import os
-import pipes
-import socket
-import random
-import logging
-import tempfile
-import traceback
-import fcntl
-import re
-import sys
-from termios import tcflush, TCIFLUSH
-from binascii import hexlify
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-from ansible import constants as C
-
-AUTHENTICITY_MSG="""
-paramiko: The authenticity of host '%s' can't be established.
-The %s key fingerprint is %s.
-Are you sure you want to continue connecting (yes/no)?
-"""
-
-# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
-HAVE_PARAMIKO=False
-with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- try:
- import paramiko
- HAVE_PARAMIKO=True
- logging.getLogger("paramiko").setLevel(logging.WARNING)
- except ImportError:
- pass
-
-class MyAddPolicy(object):
- """
- Based on AutoAddPolicy in paramiko so we can determine when keys are added
- and also prompt for input.
-
- Policy for automatically adding the hostname and new host key to the
- local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
- """
-
- def __init__(self, runner):
- self.runner = runner
-
- def missing_host_key(self, client, hostname, key):
-
- if C.HOST_KEY_CHECKING:
-
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
- old_stdin = sys.stdin
- sys.stdin = self.runner._new_stdin
- fingerprint = hexlify(key.get_fingerprint())
- ktype = key.get_name()
-
- # clear out any premature input on sys.stdin
- tcflush(sys.stdin, TCIFLUSH)
-
- inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
- sys.stdin = old_stdin
- if inp not in ['yes','y','']:
- fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
- raise errors.AnsibleError("host connection rejected by user")
-
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
-
- key._added_by_ansible_this_time = True
-
- # existing implementation below:
- client._host_keys.add(hostname, key.get_name(), key)
-
- # host keys are actually saved in close() function below
- # in order to control ordering.
-
-
-# keep connection objects on a per host basis to avoid repeated attempts to reconnect
-
-SSH_CONNECTION_CACHE = {}
-SFTP_CONNECTION_CACHE = {}
-
-class Connection(object):
- ''' SSH based connections with Paramiko '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
-
- self.ssh = None
- self.sftp = None
- self.runner = runner
- self.host = host
- self.port = port or 22
- self.user = user
- self.password = password
- self.private_key_file = private_key_file
- self.has_pipelining = False
-
- # TODO: add pbrun, pfexec
- self.become_methods_supported=['sudo', 'su', 'pbrun']
-
- def _cache_key(self):
- return "%s__%s__" % (self.host, self.user)
-
- def connect(self):
- cache_key = self._cache_key()
- if cache_key in SSH_CONNECTION_CACHE:
- self.ssh = SSH_CONNECTION_CACHE[cache_key]
- else:
- self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
- return self
-
- def _connect_uncached(self):
- ''' activates the connection object '''
-
- if not HAVE_PARAMIKO:
- raise errors.AnsibleError("paramiko is not installed")
-
- vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host)
-
- ssh = paramiko.SSHClient()
-
- self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
-
- if C.HOST_KEY_CHECKING:
- ssh.load_system_host_keys()
-
- ssh.set_missing_host_key_policy(MyAddPolicy(self.runner))
-
- allow_agent = True
-
- if self.password is not None:
- allow_agent = False
-
- try:
-
- if self.private_key_file:
- key_filename = os.path.expanduser(self.private_key_file)
- elif self.runner.private_key_file:
- key_filename = os.path.expanduser(self.runner.private_key_file)
- else:
- key_filename = None
- ssh.connect(self.host, username=self.user, allow_agent=allow_agent, look_for_keys=True,
- key_filename=key_filename, password=self.password,
- timeout=self.runner.timeout, port=self.port)
-
- except Exception, e:
-
- msg = str(e)
- if "PID check failed" in msg:
- raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
- elif "Private key file is encrypted" in msg:
- msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
- self.user, self.host, self.port, msg)
- raise errors.AnsibleConnectionFailed(msg)
- else:
- raise errors.AnsibleConnectionFailed(msg)
-
- return ssh
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- bufsize = 4096
-
- try:
-
- self.ssh.get_transport().set_keepalive(5)
- chan = self.ssh.get_transport().open_session()
-
- except Exception, e:
-
- msg = "Failed to open session"
- if len(str(e)) > 0:
- msg += ": %s" % str(e)
- raise errors.AnsibleConnectionFailed(msg)
-
- no_prompt_out = ''
- no_prompt_err = ''
- if not (self.runner.become and sudoable):
-
- if executable:
- quoted_command = executable + ' -c ' + pipes.quote(cmd)
- else:
- quoted_command = cmd
- vvv("EXEC %s" % quoted_command, host=self.host)
- chan.exec_command(quoted_command)
-
- else:
-
- # sudo usually requires a PTY (cf. requiretty option), therefore
- # we give it one by default (pty=True in ansble.cfg), and we try
- # to initialise from the calling environment
- if C.PARAMIKO_PTY:
- chan.get_pty(term=os.getenv('TERM', 'vt100'),
- width=int(os.getenv('COLUMNS', 0)),
- height=int(os.getenv('LINES', 0)))
- if self.runner.become and sudoable:
- shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
-
- vvv("EXEC %s" % shcmd, host=self.host)
- become_output = ''
-
- try:
-
- chan.exec_command(shcmd)
-
- if self.runner.become_pass:
-
- while True:
-
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
- chunk = chan.recv(bufsize)
-
- if not chunk:
- if 'unknown user' in become_output:
- raise errors.AnsibleError(
- 'user %s does not exist' % become_user)
- else:
- raise errors.AnsibleError('ssh connection ' +
- 'closed waiting for password prompt')
- become_output += chunk
-
- if success_key not in become_output:
-
- if sudoable:
- chan.sendall(self.runner.become_pass + '\n')
- else:
- no_prompt_out += become_output
- no_prompt_err += become_output
-
- except socket.timeout:
-
- raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
-
- stdout = ''.join(chan.makefile('rb', bufsize))
- stderr = ''.join(chan.makefile_stderr('rb', bufsize))
-
- return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
-
- try:
- self.sftp = self.ssh.open_sftp()
- except Exception, e:
- raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e)
-
- try:
- self.sftp.put(in_path, out_path)
- except IOError:
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def _connect_sftp(self):
-
- cache_key = "%s__%s__" % (self.host, self.user)
- if cache_key in SFTP_CONNECTION_CACHE:
- return SFTP_CONNECTION_CACHE[cache_key]
- else:
- result = SFTP_CONNECTION_CACHE[cache_key] = self.connect().ssh.open_sftp()
- return result
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
-
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- try:
- self.sftp = self._connect_sftp()
- except Exception, e:
- raise errors.AnsibleError("failed to open a SFTP connection (%s)", e)
-
- try:
- self.sftp.get(in_path, out_path)
- except IOError:
- raise errors.AnsibleError("failed to transfer file from %s" % in_path)
-
- def _any_keys_added(self):
-
- added_any = False
- for hostname, keys in self.ssh._host_keys.iteritems():
- for keytype, key in keys.iteritems():
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if added_this_time:
- return True
- return False
-
- def _save_ssh_host_keys(self, filename):
- '''
- not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
- don't complain about it :)
- '''
-
- if not self._any_keys_added():
- return False
-
- path = os.path.expanduser("~/.ssh")
- if not os.path.exists(path):
- os.makedirs(path)
-
- f = open(filename, 'w')
-
- for hostname, keys in self.ssh._host_keys.iteritems():
-
- for keytype, key in keys.iteritems():
-
- # was f.write
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if not added_this_time:
- f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
-
- for hostname, keys in self.ssh._host_keys.iteritems():
-
- for keytype, key in keys.iteritems():
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if added_this_time:
- f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
-
- f.close()
-
- def close(self):
- ''' terminate the connection '''
-
- cache_key = self._cache_key()
- SSH_CONNECTION_CACHE.pop(cache_key, None)
- SFTP_CONNECTION_CACHE.pop(cache_key, None)
-
- if self.sftp is not None:
- self.sftp.close()
-
- if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
-
- # add any new SSH host keys -- warning -- this could be slow
- lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
- dirname = os.path.dirname(self.keyfile)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- KEY_LOCK = open(lockfile, 'w')
- fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
-
- try:
- # just in case any were added recently
-
- self.ssh.load_system_host_keys()
- self.ssh._host_keys.update(self.ssh._system_host_keys)
-
- # gather information about the current key file, so
- # we can ensure the new file has the correct mode/owner
-
- key_dir = os.path.dirname(self.keyfile)
- key_stat = os.stat(self.keyfile)
-
- # Save the new keys to a temporary file and move it into place
- # rather than rewriting the file. We set delete=False because
- # the file will be moved into place rather than cleaned up.
-
- tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
- os.chmod(tmp_keyfile.name, key_stat.st_mode & 07777)
- os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
-
- self._save_ssh_host_keys(tmp_keyfile.name)
- tmp_keyfile.close()
-
- os.rename(tmp_keyfile.name, self.keyfile)
-
- except:
-
- # unable to save keys, including scenario when key was invalid
- # and caught earlier
- traceback.print_exc()
- pass
- fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
-
- self.ssh.close()
-
diff --git a/v1/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py
deleted file mode 100644
index 036175f6a9..0000000000
--- a/v1/ansible/runner/connection_plugins/ssh.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import re
-import subprocess
-import shlex
-import pipes
-import random
-import select
-import fcntl
-import hmac
-import pwd
-import gettext
-import pty
-from hashlib import sha1
-import ansible.constants as C
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-
-
-class Connection(object):
- ''' ssh based connections '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.ipv6 = ':' in self.host
- self.port = port
- self.user = str(user)
- self.password = password
- self.private_key_file = private_key_file
- self.HASHED_KEY_MAGIC = "|1|"
- self.has_pipelining = True
-
- # TODO: add pbrun, pfexec
- self.become_methods_supported=['sudo', 'su', 'pbrun']
-
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
- def connect(self):
- ''' connect to the remote host '''
-
- vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
-
- self.common_args = []
- extra_args = C.ANSIBLE_SSH_ARGS
- if extra_args is not None:
- # make sure there is no empty string added as this can produce weird errors
- self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
- else:
- self.common_args += ["-o", "ControlMaster=auto",
- "-o", "ControlPersist=60s",
- "-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- cp_in_use = False
- cp_path_set = False
- for arg in self.common_args:
- if "ControlPersist" in arg:
- cp_in_use = True
- if "ControlPath" in arg:
- cp_path_set = True
-
- if cp_in_use and not cp_path_set:
- self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- if not C.HOST_KEY_CHECKING:
- self.common_args += ["-o", "StrictHostKeyChecking=no"]
-
- if self.port is not None:
- self.common_args += ["-o", "Port=%d" % (self.port)]
- if self.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
- elif self.runner.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
- if self.password:
- self.common_args += ["-o", "GSSAPIAuthentication=no",
- "-o", "PubkeyAuthentication=no"]
- else:
- self.common_args += ["-o", "KbdInteractiveAuthentication=no",
- "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- "-o", "PasswordAuthentication=no"]
- if self.user != pwd.getpwuid(os.geteuid())[0]:
- self.common_args += ["-o", "User="+self.user]
- self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
-
- return self
-
- def _run(self, cmd, indata):
- if indata:
- # do not use pseudo-pty
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
- else:
- # try to use upseudo-pty
- try:
- # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
- master, slave = pty.openpty()
- p = subprocess.Popen(cmd, stdin=slave,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = os.fdopen(master, 'w', 0)
- os.close(slave)
- except:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
-
- return (p, stdin)
-
- def _password_cmd(self):
- if self.password:
- try:
- p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- except OSError:
- raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
- (self.rfd, self.wfd) = os.pipe()
- return ["sshpass", "-d%d" % self.rfd]
- return []
-
- def _send_password(self):
- if self.password:
- os.close(self.rfd)
- os.write(self.wfd, "%s\n" % self.password)
- os.close(self.wfd)
-
- def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- # We can't use p.communicate here because the ControlMaster may have stdout open as well
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- if indata:
- try:
- stdin.write(indata)
- stdin.close()
- except:
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- # Read stdout/stderr from process
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- # fail early if the become password is wrong
- if self.runner.become and sudoable:
- incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method])
-
- if prompt:
- if self.runner.become_pass:
- if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect become password')
-
- if stdout.endswith(prompt):
- raise errors.AnsibleError('Missing become password')
- elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect become password')
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), 9000)
- stderr += dat
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if no pipes are left to read or
- # the pipes are completely read and
- # the process is terminated
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # No pipes are left to read but process is not yet terminated
- # Only then it is safe to wait for the process to be finished
- # NOTE: Actually p.poll() is always None here if rpipes is empty
- elif not rpipes and p.poll() == None:
- p.wait()
- # The process is terminated. Since no pipes to read from are
- # left, there is no need to call select() again.
- break
- # close stdin after process is terminated and stdout/stderr are read
- # completely (see also issue #848)
- stdin.close()
- return (p.returncode, stdout, stderr)
-
- def not_in_host_file(self, host):
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
- try:
- host_fh = open(hf)
- except IOError, e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- line = line.strip()
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if not tokens:
- continue
- if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- if (hfiles_not_found == len(host_file_list)):
- vvv("EXEC previous known host file not found for %s" % host)
- return True
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- ssh_cmd = self._password_cmd()
- ssh_cmd += ["ssh", "-C"]
- if not in_data:
- # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
- # inside a tty automatically invokes the python interactive-mode but the modules are not
- # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
- ssh_cmd += ["-tt"]
- if utils.VERBOSITY > 3:
- ssh_cmd += ["-vvv"]
- else:
- if self.runner.module_name == 'raw':
- ssh_cmd += ["-q"]
- else:
- ssh_cmd += ["-v"]
- ssh_cmd += self.common_args
-
- if self.ipv6:
- ssh_cmd += ['-6']
- ssh_cmd += [self.host]
-
- if self.runner.become and sudoable:
- becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
- ssh_cmd.append(becomecmd)
- else:
- prompt = None
- if executable:
- ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
- else:
- ssh_cmd.append(cmd)
-
- vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host)
-
- not_in_host_file = self.not_in_host_file(self.host)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
- # create process
- (p, stdin) = self._run(ssh_cmd, in_data)
-
- self._send_password()
-
- no_prompt_out = ''
- no_prompt_err = ''
- if sudoable and self.runner.become and self.runner.become_pass:
- # several cases are handled for escalated privileges with password
- # * NOPASSWD (tty & no-tty): detect success_key on stdout
- # * without NOPASSWD:
- # * detect prompt on stdout (tty)
- # * detect prompt on stderr (no-tty)
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- become_output = ''
- become_errput = ''
-
- while True:
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout], self.runner.timeout)
- if p.stderr in rfd:
- chunk = p.stderr.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt')
- become_errput += chunk
- incorrect_password = gettext.dgettext(
- "become", "Sorry, try again.")
- if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
- raise errors.AnsibleError('Incorrect become password')
- elif prompt and become_errput.endswith(prompt):
- stdin.write(self.runner.become_pass + '\n')
-
- if p.stdout in rfd:
- chunk = p.stdout.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method)
- become_output += chunk
-
- if not rfd:
- # timeout. wrap up process communication
- stdout = p.communicate()
- raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
-
- if success_key in become_output:
- no_prompt_out += become_output
- no_prompt_err += become_errput
- elif sudoable:
- stdin.write(self.runner.become_pass + '\n')
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
- controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
- 'unknown configuration option: ControlPersist' in stderr
-
- if C.HOST_KEY_CHECKING:
- if ssh_cmd[0] == "sshpass" and p.returncode == 6:
- raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
- if p.returncode != 0 and controlpersisterror:
- raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
- if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- if p.returncode == 255:
- ip = None
- port = None
- for line in stderr.splitlines():
- match = re.search(
- 'Connecting to .*\[(\d+\.\d+\.\d+\.\d+)\] port (\d+)',
- line)
- if match:
- ip = match.group(1)
- port = match.group(2)
- if 'UNPROTECTED PRIVATE KEY FILE' in stderr:
- lines = [line for line in stderr.splitlines()
- if 'ignore key:' in line]
- else:
- lines = stderr.splitlines()[-1:]
- if ip and port:
- lines.append(' while connecting to %s:%s' % (ip, port))
- lines.append(
- 'It is sometimes useful to re-run the command using -vvvv, '
- 'which prints SSH debug output to help diagnose the issue.')
- raise errors.AnsibleError('SSH Error: %s' % '\n'.join(lines))
-
- return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [in_path,host + ":" + pipes.quote(out_path)]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
-
- (p, stdin) = self._run(cmd, indata)
-
- self._send_password()
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
-
- if returncode != 0:
- raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [host + ":" + in_path, out_path]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "get %s %s\n" % (in_path, out_path)
-
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- self._send_password()
- stdout, stderr = p.communicate(indata)
-
- if p.returncode != 0:
- raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
-
- def close(self):
- ''' not applicable since we're executing openssh binaries '''
- pass
-
diff --git a/v1/ansible/runner/connection_plugins/winrm.py b/v1/ansible/runner/connection_plugins/winrm.py
deleted file mode 100644
index b41a74c8e1..0000000000
--- a/v1/ansible/runner/connection_plugins/winrm.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import base64
-import os
-import re
-import shlex
-import traceback
-import urlparse
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv, vvvv, verbose
-from ansible.runner.shell_plugins import powershell
-
-try:
- from winrm import Response
- from winrm.exceptions import WinRMTransportError
- from winrm.protocol import Protocol
-except ImportError:
- raise errors.AnsibleError("winrm is not installed")
-
-HAVE_KERBEROS = False
-try:
- import kerberos
- HAVE_KERBEROS = True
-except ImportError:
- pass
-
-def vvvvv(msg, host=None):
- verbose(msg, host=host, caplevel=4)
-
-class Connection(object):
- '''WinRM connections over HTTP/HTTPS.'''
-
- transport_schemes = {
- 'http': [('kerberos', 'http'), ('plaintext', 'http'), ('plaintext', 'https')],
- 'https': [('kerberos', 'https'), ('plaintext', 'https')],
- }
-
- def __init__(self, runner, host, port, user, password, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.port = port
- self.user = user
- self.password = password
- self.has_pipelining = False
- self.default_shell = 'powershell'
- self.default_suffixes = ['.ps1', '']
- self.protocol = None
- self.shell_id = None
- self.delegate = None
-
- # Add runas support
- #self.become_methods_supported=['runas']
- self.become_methods_supported=[]
-
- def _winrm_connect(self):
- '''
- Establish a WinRM connection over HTTP/HTTPS.
- '''
- port = self.port or 5986
- vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
- (self.user, port, self.host), host=self.host)
- netloc = '%s:%d' % (self.host, port)
- exc = None
- for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
- if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user):
- continue
- if transport == 'kerberos':
- realm = self.user.split('@', 1)[1].strip() or None
- else:
- realm = None
- endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
- vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
- host=self.host)
- protocol = Protocol(endpoint, transport=transport,
- username=self.user, password=self.password,
- realm=realm)
- try:
- protocol.send_message('')
- return protocol
- except WinRMTransportError, exc:
- err_msg = str(exc)
- if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
- raise errors.AnsibleError("the connection attempt timed out")
- m = re.search(r'Code\s+?(\d{3})', err_msg)
- if m:
- code = int(m.groups()[0])
- if code == 401:
- raise errors.AnsibleError("the username/password specified for this server was incorrect")
- elif code == 411:
- return protocol
- vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
- continue
- if exc:
- raise errors.AnsibleError(str(exc))
-
- def _winrm_exec(self, command, args=(), from_exec=False):
- if from_exec:
- vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
- else:
- vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
- if not self.protocol:
- self.protocol = self._winrm_connect()
- if not self.shell_id:
- self.shell_id = self.protocol.open_shell()
- command_id = None
- try:
- command_id = self.protocol.run_command(self.shell_id, command, args)
- response = Response(self.protocol.get_command_output(self.shell_id, command_id))
- if from_exec:
- vvvv('WINRM RESULT %r' % response, host=self.host)
- else:
- vvvvv('WINRM RESULT %r' % response, host=self.host)
- vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
- vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
- return response
- finally:
- if command_id:
- self.protocol.cleanup_command(self.shell_id, command_id)
-
- def connect(self):
- if not self.protocol:
- self.protocol = self._winrm_connect()
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- cmd = cmd.encode('utf-8')
- cmd_parts = shlex.split(cmd, posix=False)
- if '-EncodedCommand' in cmd_parts:
- encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
- decoded_cmd = base64.b64decode(encoded_cmd)
- vvv("EXEC %s" % decoded_cmd, host=self.host)
- else:
- vvv("EXEC %s" % cmd, host=self.host)
- # For script/raw support.
- if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
- script = powershell._build_file_cmd(cmd_parts, quote_args=False)
- cmd_parts = powershell._encode_script(script, as_list=True)
- try:
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
- except Exception, e:
- traceback.print_exc()
- raise errors.AnsibleError("failed to exec cmd %s" % cmd)
- return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
-
- def put_file(self, in_path, out_path):
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- with open(in_path) as in_file:
- in_size = os.path.getsize(in_path)
- script_template = '''
- $s = [System.IO.File]::OpenWrite("%s");
- [void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
- $b = [System.Convert]::FromBase64String("%s");
- [void]$s.Write($b, 0, $b.length);
- [void]$s.SetLength(%d);
- [void]$s.Close();
- '''
- # Determine max size of data we can pass per command.
- script = script_template % (powershell._escape(out_path), in_size, '', in_size)
- cmd = powershell._encode_script(script)
- # Encode script with no data, subtract its length from 8190 (max
- # windows command length), divide by 2.67 (UTF16LE base64 command
- # encoding), then by 1.35 again (data base64 encoding).
- buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
- for offset in xrange(0, in_size, buffer_size):
- try:
- out_data = in_file.read(buffer_size)
- if offset == 0:
- if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
- out_path = out_path + '.ps1'
- b64_data = base64.b64encode(out_data)
- script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
- vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
- cmd_parts = powershell._encode_script(script, as_list=True)
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
- if result.status_code != 0:
- raise IOError(result.std_err.encode('utf-8'))
- except Exception:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- out_path = out_path.replace('\\', '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- buffer_size = 2**19 # 0.5MB chunks
- if not os.path.exists(os.path.dirname(out_path)):
- os.makedirs(os.path.dirname(out_path))
- out_file = None
- try:
- offset = 0
- while True:
- try:
- script = '''
- If (Test-Path -PathType Leaf "%(path)s")
- {
- $stream = [System.IO.File]::OpenRead("%(path)s");
- $stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
- $buffer = New-Object Byte[] %(buffer_size)d;
- $bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
- $bytes = $buffer[0..($bytesRead-1)];
- [System.Convert]::ToBase64String($bytes);
- $stream.Close() | Out-Null;
- }
- ElseIf (Test-Path -PathType Container "%(path)s")
- {
- Write-Host "[DIR]";
- }
- Else
- {
- Write-Error "%(path)s does not exist";
- Exit 1;
- }
- ''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
- vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
- cmd_parts = powershell._encode_script(script, as_list=True)
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
- if result.status_code != 0:
- raise IOError(result.std_err.encode('utf-8'))
- if result.std_out.strip() == '[DIR]':
- data = None
- else:
- data = base64.b64decode(result.std_out.strip())
- if data is None:
- if not os.path.exists(out_path):
- os.makedirs(out_path)
- break
- else:
- if not out_file:
- # If out_path is a directory and we're expecting a file, bail out now.
- if os.path.isdir(out_path):
- break
- out_file = open(out_path, 'wb')
- out_file.write(data)
- if len(data) < buffer_size:
- break
- offset += len(data)
- except Exception:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
- finally:
- if out_file:
- out_file.close()
-
- def close(self):
- if self.protocol and self.shell_id:
- self.protocol.close_shell(self.shell_id)
- self.shell_id = None
diff --git a/v1/ansible/runner/connection_plugins/zone.py b/v1/ansible/runner/connection_plugins/zone.py
deleted file mode 100644
index fd3242cb6e..0000000000
--- a/v1/ansible/runner/connection_plugins/zone.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
-# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import traceback
-import os
-import shutil
-import subprocess
-from subprocess import Popen,PIPE
-from ansible import errors
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local zone based connections '''
-
- def _search_executable(self, executable):
- cmd = distutils.spawn.find_executable(executable)
- if not cmd:
- raise errors.AnsibleError("%s command not found in PATH") % executable
- return cmd
-
- def list_zones(self):
- pipe = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- #stdout, stderr = p.communicate()
- zones = []
- for l in pipe.stdout.readlines():
- # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
- s = l.split(':')
- if s[1] != 'global':
- zones.append(s[1])
-
- return zones
-
- def get_zone_path(self):
- #solaris10vm# zoneadm -z cswbuild list -p
- #-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
- pipe = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- #stdout, stderr = p.communicate()
- path = pipe.stdout.readlines()[0].split(':')[3]
- return path + '/root'
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.zone = host
- self.runner = runner
- self.host = host
- self.has_pipelining = False
- self.become_methods_supported=C.BECOME_METHODS
-
- if os.geteuid() != 0:
- raise errors.AnsibleError("zone connection requires running as root")
-
- self.zoneadm_cmd = self._search_executable('zoneadm')
- self.zlogin_cmd = self._search_executable('zlogin')
-
- if not self.zone in self.list_zones():
- raise errors.AnsibleError("incorrect zone name %s" % self.zone)
-
-
- self.host = host
- # port is unused, since this is local
- self.port = port
-
- def connect(self, port=None):
- ''' connect to the zone; nothing to do here '''
-
- vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
-
- return self
-
- # a modifier
- def _generate_cmd(self, executable, cmd):
- if executable:
- local_cmd = [self.zlogin_cmd, self.zone, executable, cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd)
- return local_cmd
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
- ''' run a command on the zone '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # We happily ignore privilege escalation
- if executable == '/bin/sh':
- executable = None
- local_cmd = self._generate_cmd(executable, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.zone)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def _copy_file(self, in_path, out_path):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to zone '''
-
- out_path = self._normalize_path(out_path, self.get_zone_path())
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
-
- self._copy_file(in_path, out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from zone to local '''
-
- in_path = self._normalize_path(in_path, self.get_zone_path())
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
-
- self._copy_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/filter_plugins/__init__.py b/v1/ansible/runner/filter_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/filter_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/filter_plugins/core.py b/v1/ansible/runner/filter_plugins/core.py
deleted file mode 100644
index f81da6f894..0000000000
--- a/v1/ansible/runner/filter_plugins/core.py
+++ /dev/null
@@ -1,431 +0,0 @@
-# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import sys
-import base64
-import json
-import os.path
-import types
-import pipes
-import glob
-import re
-import crypt
-import hashlib
-import string
-from functools import partial
-import operator as py_operator
-from random import SystemRandom, shuffle
-import uuid
-
-import yaml
-from jinja2.filters import environmentfilter
-from distutils.version import LooseVersion, StrictVersion
-
-from ansible import errors
-from ansible.utils.hashing import md5s, checksum_s
-from ansible.utils.unicode import unicode_wrap, to_unicode
-
-
-UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
-
-
-def to_nice_yaml(*a, **kw):
- '''Make verbose, human readable yaml'''
- transformed = yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw)
- return to_unicode(transformed)
-
-def to_json(a, *args, **kw):
- ''' Convert the value to JSON '''
- return json.dumps(a, *args, **kw)
-
-def to_nice_json(a, *args, **kw):
- '''Make verbose, human readable JSON'''
- # python-2.6's json encoder is buggy (can't encode hostvars)
- if sys.version_info < (2, 7):
- try:
- import simplejson
- except ImportError:
- pass
- else:
- try:
- major = int(simplejson.__version__.split('.')[0])
- except:
- pass
- else:
- if major >= 2:
- return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
- # Fallback to the to_json filter
- return to_json(a, *args, **kw)
- return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
-
-def failed(*a, **kw):
- ''' Test if task result yields failed '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|failed expects a dictionary")
- rc = item.get('rc',0)
- failed = item.get('failed',False)
- if rc != 0 or failed:
- return True
- else:
- return False
-
-def success(*a, **kw):
- ''' Test if task result yields success '''
- return not failed(*a, **kw)
-
-def changed(*a, **kw):
- ''' Test if task result yields changed '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|changed expects a dictionary")
- if not 'changed' in item:
- changed = False
- if ('results' in item # some modules return a 'results' key
- and type(item['results']) == list
- and type(item['results'][0]) == dict):
- for result in item['results']:
- changed = changed or result.get('changed', False)
- else:
- changed = item.get('changed', False)
- return changed
-
-def skipped(*a, **kw):
- ''' Test if task result yields skipped '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|skipped expects a dictionary")
- skipped = item.get('skipped', False)
- return skipped
-
-def mandatory(a):
- ''' Make a variable mandatory '''
- try:
- a
- except NameError:
- raise errors.AnsibleFilterError('Mandatory variable not defined.')
- else:
- return a
-
-def bool(a):
- ''' return a bool for the arg '''
- if a is None or type(a) == bool:
- return a
- if type(a) in types.StringTypes:
- a = a.lower()
- if a in ['yes', 'on', '1', 'true', 1]:
- return True
- else:
- return False
-
-def quote(a):
- ''' return its argument quoted for shell usage '''
- return pipes.quote(a)
-
-def fileglob(pathname):
- ''' return list of matched files for glob '''
- return glob.glob(pathname)
-
-def regex(value='', pattern='', ignorecase=False, match_type='search'):
- ''' Expose `re` as a boolean filter using the `search` method by default.
- This is likely only useful for `search` and `match` which already
- have their own filters.
- '''
- if ignorecase:
- flags = re.I
- else:
- flags = 0
- _re = re.compile(pattern, flags=flags)
- _bool = __builtins__.get('bool')
- return _bool(getattr(_re, match_type, 'search')(value))
-
-def match(value, pattern='', ignorecase=False):
- ''' Perform a `re.match` returning a boolean '''
- return regex(value, pattern, ignorecase, 'match')
-
-def search(value, pattern='', ignorecase=False):
- ''' Perform a `re.search` returning a boolean '''
- return regex(value, pattern, ignorecase, 'search')
-
-def regex_replace(value='', pattern='', replacement='', ignorecase=False):
- ''' Perform a `re.sub` returning a string '''
-
- if not isinstance(value, basestring):
- value = str(value)
-
- if ignorecase:
- flags = re.I
- else:
- flags = 0
- _re = re.compile(pattern, flags=flags)
- return _re.sub(replacement, value)
-
-def ternary(value, true_val, false_val):
- ''' value ? true_val : false_val '''
- if value:
- return true_val
- else:
- return false_val
-
-
-def version_compare(value, version, operator='eq', strict=False):
- ''' Perform a version comparison on a value '''
- op_map = {
- '==': 'eq', '=': 'eq', 'eq': 'eq',
- '<': 'lt', 'lt': 'lt',
- '<=': 'le', 'le': 'le',
- '>': 'gt', 'gt': 'gt',
- '>=': 'ge', 'ge': 'ge',
- '!=': 'ne', '<>': 'ne', 'ne': 'ne'
- }
-
- if strict:
- Version = StrictVersion
- else:
- Version = LooseVersion
-
- if operator in op_map:
- operator = op_map[operator]
- else:
- raise errors.AnsibleFilterError('Invalid operator type')
-
- try:
- method = getattr(py_operator, operator)
- return method(Version(str(value)), Version(str(version)))
- except Exception, e:
- raise errors.AnsibleFilterError('Version comparison: %s' % e)
-
-@environmentfilter
-def rand(environment, end, start=None, step=None):
- r = SystemRandom()
- if isinstance(end, (int, long)):
- if not start:
- start = 0
- if not step:
- step = 1
- return r.randrange(start, end, step)
- elif hasattr(end, '__iter__'):
- if start or step:
- raise errors.AnsibleFilterError('start and step can only be used with integer values')
- return r.choice(end)
- else:
- raise errors.AnsibleFilterError('random can only be used on sequences and integers')
-
-def randomize_list(mylist):
- try:
- mylist = list(mylist)
- shuffle(mylist)
- except:
- pass
- return mylist
-
-def get_hash(data, hashtype='sha1'):
-
- try: # see if hash is supported
- h = hashlib.new(hashtype)
- except:
- return None
-
- h.update(data)
- return h.hexdigest()
-
-def get_encrypted_password(password, hashtype='sha512', salt=None):
-
- # TODO: find a way to construct dynamically from system
- cryptmethod= {
- 'md5': '1',
- 'blowfish': '2a',
- 'sha256': '5',
- 'sha512': '6',
- }
-
- hastype = hashtype.lower()
- if hashtype in cryptmethod:
- if salt is None:
- r = SystemRandom()
- salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
-
- saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
- encrypted = crypt.crypt(password,saltstring)
- return encrypted
-
- return None
-
-def to_uuid(string):
- return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
-
-def comment(text, style='plain', **kw):
- # Predefined comment types
- comment_styles = {
- 'plain': {
- 'decoration': '# '
- },
- 'erlang': {
- 'decoration': '% '
- },
- 'c': {
- 'decoration': '// '
- },
- 'cblock': {
- 'beginning': '/*',
- 'decoration': ' * ',
- 'end': ' */'
- },
- 'xml': {
- 'beginning': '<!--',
- 'decoration': ' - ',
- 'end': '-->'
- }
- }
-
- # Pointer to the right comment type
- style_params = comment_styles[style]
-
- if 'decoration' in kw:
- prepostfix = kw['decoration']
- else:
- prepostfix = style_params['decoration']
-
- # Default params
- p = {
- 'newline': '\n',
- 'beginning': '',
- 'prefix': (prepostfix).rstrip(),
- 'prefix_count': 1,
- 'decoration': '',
- 'postfix': (prepostfix).rstrip(),
- 'postfix_count': 1,
- 'end': ''
- }
-
- # Update default params
- p.update(style_params)
- p.update(kw)
-
- # Compose substrings for the final string
- str_beginning = ''
- if p['beginning']:
- str_beginning = "%s%s" % (p['beginning'], p['newline'])
- str_prefix = str(
- "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
- str_text = ("%s%s" % (
- p['decoration'],
- # Prepend each line of the text with the decorator
- text.replace(
- p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
- # Remove trailing spaces when only decorator is on the line
- "%s%s" % (p['decoration'], p['newline']),
- "%s%s" % (p['decoration'].rstrip(), p['newline']))
- str_postfix = p['newline'].join(
- [''] + [p['postfix'] for x in range(p['postfix_count'])])
- str_end = ''
- if p['end']:
- str_end = "%s%s" % (p['newline'], p['end'])
-
- # Return the final string
- return "%s%s%s%s%s" % (
- str_beginning,
- str_prefix,
- str_text,
- str_postfix,
- str_end)
-
-
-class FilterModule(object):
- ''' Ansible core jinja2 filters '''
-
- def filters(self):
- return {
- # base 64
- 'b64decode': partial(unicode_wrap, base64.b64decode),
- 'b64encode': partial(unicode_wrap, base64.b64encode),
-
- # uuid
- 'to_uuid': to_uuid,
-
- # json
- 'to_json': to_json,
- 'to_nice_json': to_nice_json,
- 'from_json': json.loads,
-
- # yaml
- 'to_yaml': yaml.safe_dump,
- 'to_nice_yaml': to_nice_yaml,
- 'from_yaml': yaml.safe_load,
-
- # path
- 'basename': partial(unicode_wrap, os.path.basename),
- 'dirname': partial(unicode_wrap, os.path.dirname),
- 'expanduser': partial(unicode_wrap, os.path.expanduser),
- 'realpath': partial(unicode_wrap, os.path.realpath),
- 'relpath': partial(unicode_wrap, os.path.relpath),
-
- # failure testing
- 'failed' : failed,
- 'success' : success,
-
- # changed testing
- 'changed' : changed,
-
- # skip testing
- 'skipped' : skipped,
-
- # variable existence
- 'mandatory': mandatory,
-
- # value as boolean
- 'bool': bool,
-
- # quote string for shell usage
- 'quote': quote,
-
- # hash filters
- # md5 hex digest of string
- 'md5': md5s,
- # sha1 hex digeset of string
- 'sha1': checksum_s,
- # checksum of string as used by ansible for checksuming files
- 'checksum': checksum_s,
- # generic hashing
- 'password_hash': get_encrypted_password,
- 'hash': get_hash,
-
- # file glob
- 'fileglob': fileglob,
-
- # regex
- 'match': match,
- 'search': search,
- 'regex': regex,
- 'regex_replace': regex_replace,
-
- # ? : ;
- 'ternary': ternary,
-
- # list
- # version comparison
- 'version_compare': version_compare,
-
- # random stuff
- 'random': rand,
- 'shuffle': randomize_list,
-
- # comment-style decoration of string
- 'comment': comment,
- }
diff --git a/v1/ansible/runner/filter_plugins/ipaddr.py b/v1/ansible/runner/filter_plugins/ipaddr.py
deleted file mode 100644
index 5d9d6e3136..0000000000
--- a/v1/ansible/runner/filter_plugins/ipaddr.py
+++ /dev/null
@@ -1,659 +0,0 @@
-# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from functools import partial
-
-try:
- import netaddr
-except ImportError:
- # in this case, we'll make the filters return error messages (see bottom)
- netaddr = None
-else:
- class mac_linux(netaddr.mac_unix):
- pass
- mac_linux.word_fmt = '%.2x'
-
-from ansible import errors
-
-
-# ---- IP address and network query helpers ----
-
-def _empty_ipaddr_query(v, vtype):
- # We don't have any query to process, so just check what type the user
- # expects, and return the IP address in a correct format
- if v:
- if vtype == 'address':
- return str(v.ip)
- elif vtype == 'network':
- return str(v)
-
-def _6to4_query(v, vtype, value):
- if v.version == 4:
-
- if v.size == 1:
- ipconv = str(v.ip)
- elif v.size > 1:
- if v.ip != v.network:
- ipconv = str(v.ip)
- else:
- ipconv = False
-
- if ipaddr(ipconv, 'public'):
- numbers = list(map(int, ipconv.split('.')))
-
- try:
- return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
- except:
- return False
-
- elif v.version == 6:
- if vtype == 'address':
- if ipaddr(str(v), '2002::/16'):
- return value
- elif vtype == 'network':
- if v.ip != v.network:
- if ipaddr(str(v.ip), '2002::/16'):
- return value
- else:
- return False
-
-def _ip_query(v):
- if v.size == 1:
- return str(v.ip)
- if v.size > 1:
- if v.ip != v.network:
- return str(v.ip)
-
-def _gateway_query(v):
- if v.size > 1:
- if v.ip != v.network:
- return str(v.ip) + '/' + str(v.prefixlen)
-
-def _bool_ipaddr_query(v):
- if v:
- return True
-
-def _broadcast_query(v):
- if v.size > 1:
- return str(v.broadcast)
-
-def _cidr_query(v):
- return str(v)
-
-def _cidr_lookup_query(v, iplist, value):
- try:
- if v in iplist:
- return value
- except:
- return False
-
-def _host_query(v):
- if v.size == 1:
- return str(v)
- elif v.size > 1:
- if v.ip != v.network:
- return str(v.ip) + '/' + str(v.prefixlen)
-
-def _hostmask_query(v):
- return str(v.hostmask)
-
-def _int_query(v, vtype):
- if vtype == 'address':
- return int(v.ip)
- elif vtype == 'network':
- return str(int(v.ip)) + '/' + str(int(v.prefixlen))
-
-def _ipv4_query(v, value):
- if v.version == 6:
- try:
- return str(v.ipv4())
- except:
- return False
- else:
- return value
-
-def _ipv6_query(v, value):
- if v.version == 4:
- return str(v.ipv6())
- else:
- return value
-
-def _link_local_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v.version == 4:
- if ipaddr(str(v_ip), '169.254.0.0/24'):
- return value
-
- elif v.version == 6:
- if ipaddr(str(v_ip), 'fe80::/10'):
- return value
-
-def _loopback_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v_ip.is_loopback():
- return value
-
-def _multicast_query(v, value):
- if v.is_multicast():
- return value
-
-def _net_query(v):
- if v.size > 1:
- if v.ip == v.network:
- return str(v.network) + '/' + str(v.prefixlen)
-
-def _netmask_query(v):
- if v.size > 1:
- return str(v.netmask)
-
-def _network_query(v):
- if v.size > 1:
- return str(v.network)
-
-def _prefix_query(v):
- return int(v.prefixlen)
-
-def _private_query(v, value):
- if v.is_private():
- return value
-
-def _public_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v_ip.is_unicast() and not v_ip.is_private() and \
- not v_ip.is_loopback() and not v_ip.is_netmask() and \
- not v_ip.is_hostmask():
- return value
-
-def _revdns_query(v):
- v_ip = netaddr.IPAddress(str(v.ip))
- return v_ip.reverse_dns
-
-def _size_query(v):
- return v.size
-
-def _subnet_query(v):
- return str(v.cidr)
-
-def _type_query(v):
- if v.size == 1:
- return 'address'
- if v.size > 1:
- if v.ip != v.network:
- return 'address'
- else:
- return 'network'
-
-def _unicast_query(v, value):
- if v.is_unicast():
- return value
-
-def _version_query(v):
- return v.version
-
-def _wrap_query(v, vtype, value):
- if v.version == 6:
- if vtype == 'address':
- return '[' + str(v.ip) + ']'
- elif vtype == 'network':
- return '[' + str(v.ip) + ']/' + str(v.prefixlen)
- else:
- return value
-
-
-# ---- HWaddr query helpers ----
-def _bare_query(v):
- v.dialect = netaddr.mac_bare
- return str(v)
-
-def _bool_hwaddr_query(v):
- if v:
- return True
-
-def _cisco_query(v):
- v.dialect = netaddr.mac_cisco
- return str(v)
-
-def _empty_hwaddr_query(v, value):
- if v:
- return value
-
-def _linux_query(v):
- v.dialect = mac_linux
- return str(v)
-
-def _postgresql_query(v):
- v.dialect = netaddr.mac_pgsql
- return str(v)
-
-def _unix_query(v):
- v.dialect = netaddr.mac_unix
- return str(v)
-
-def _win_query(v):
- v.dialect = netaddr.mac_eui48
- return str(v)
-
-
-# ---- IP address and network filters ----
-
-def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
- ''' Check if string is an IP address or network and filter it '''
-
- query_func_extra_args = {
- '': ('vtype',),
- '6to4': ('vtype', 'value'),
- 'cidr_lookup': ('iplist', 'value'),
- 'int': ('vtype',),
- 'ipv4': ('value',),
- 'ipv6': ('value',),
- 'link-local': ('value',),
- 'loopback': ('value',),
- 'lo': ('value',),
- 'multicast': ('value',),
- 'private': ('value',),
- 'public': ('value',),
- 'unicast': ('value',),
- 'wrap': ('vtype', 'value'),
- }
- query_func_map = {
- '': _empty_ipaddr_query,
- '6to4': _6to4_query,
- 'address': _ip_query,
- 'address/prefix': _gateway_query,
- 'bool': _bool_ipaddr_query,
- 'broadcast': _broadcast_query,
- 'cidr': _cidr_query,
- 'cidr_lookup': _cidr_lookup_query,
- 'gateway': _gateway_query,
- 'gw': _gateway_query,
- 'host': _host_query,
- 'host/prefix': _gateway_query,
- 'hostmask': _hostmask_query,
- 'hostnet': _gateway_query,
- 'int': _int_query,
- 'ip': _ip_query,
- 'ipv4': _ipv4_query,
- 'ipv6': _ipv6_query,
- 'link-local': _link_local_query,
- 'lo': _loopback_query,
- 'loopback': _loopback_query,
- 'multicast': _multicast_query,
- 'net': _net_query,
- 'netmask': _netmask_query,
- 'network': _network_query,
- 'prefix': _prefix_query,
- 'private': _private_query,
- 'public': _public_query,
- 'revdns': _revdns_query,
- 'router': _gateway_query,
- 'size': _size_query,
- 'subnet': _subnet_query,
- 'type': _type_query,
- 'unicast': _unicast_query,
- 'v4': _ipv4_query,
- 'v6': _ipv6_query,
- 'version': _version_query,
- 'wrap': _wrap_query,
- }
-
- vtype = None
-
- if not value:
- return False
-
- elif value == True:
- return False
-
- # Check if value is a list and parse each element
- elif isinstance(value, (list, tuple)):
-
- _ret = []
- for element in value:
- if ipaddr(element, str(query), version):
- _ret.append(ipaddr(element, str(query), version))
-
- if _ret:
- return _ret
- else:
- return list()
-
- # Check if value is a number and convert it to an IP address
- elif str(value).isdigit():
-
- # We don't know what IP version to assume, so let's check IPv4 first,
- # then IPv6
- try:
- if ((not version) or (version and version == 4)):
- v = netaddr.IPNetwork('0.0.0.0/0')
- v.value = int(value)
- v.prefixlen = 32
- elif version and version == 6:
- v = netaddr.IPNetwork('::/0')
- v.value = int(value)
- v.prefixlen = 128
-
- # IPv4 didn't work the first time, so it definitely has to be IPv6
- except:
- try:
- v = netaddr.IPNetwork('::/0')
- v.value = int(value)
- v.prefixlen = 128
-
- # The value is too big for IPv6. Are you a nanobot?
- except:
- return False
-
- # We got an IP address, let's mark it as such
- value = str(v)
- vtype = 'address'
-
- # value has not been recognized, check if it's a valid IP string
- else:
- try:
- v = netaddr.IPNetwork(value)
-
- # value is a valid IP string, check if user specified
- # CIDR prefix or just an IP address, this will indicate default
- # output format
- try:
- address, prefix = value.split('/')
- vtype = 'network'
- except:
- vtype = 'address'
-
- # value hasn't been recognized, maybe it's a numerical CIDR?
- except:
- try:
- address, prefix = value.split('/')
- address.isdigit()
- address = int(address)
- prefix.isdigit()
- prefix = int(prefix)
-
- # It's not numerical CIDR, give up
- except:
- return False
-
- # It is something, so let's try and build a CIDR from the parts
- try:
- v = netaddr.IPNetwork('0.0.0.0/0')
- v.value = address
- v.prefixlen = prefix
-
- # It's not a valid IPv4 CIDR
- except:
- try:
- v = netaddr.IPNetwork('::/0')
- v.value = address
- v.prefixlen = prefix
-
- # It's not a valid IPv6 CIDR. Give up.
- except:
- return False
-
- # We have a valid CIDR, so let's write it in correct format
- value = str(v)
- vtype = 'network'
-
- # We have a query string but it's not in the known query types. Check if
- # that string is a valid subnet, if so, we can check later if given IP
- # address/network is inside that specific subnet
- try:
- ### ?? 6to4 and link-local were True here before. Should they still?
- if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
- iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
- query = 'cidr_lookup'
- except:
- pass
-
- # This code checks if value maches the IP version the user wants, ie. if
- # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
- # If version does not match, return False
- if version and v.version != version:
- return False
-
- extras = []
- for arg in query_func_extra_args.get(query, tuple()):
- extras.append(locals()[arg])
- try:
- return query_func_map[query](v, *extras)
- except KeyError:
- try:
- float(query)
- if v.size == 1:
- if vtype == 'address':
- return str(v.ip)
- elif vtype == 'network':
- return str(v)
-
- elif v.size > 1:
- try:
- return str(v[query]) + '/' + str(v.prefixlen)
- except:
- return False
-
- else:
- return value
-
- except:
- raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
-
- return False
-
-
-def ipwrap(value, query = ''):
- try:
- if isinstance(value, (list, tuple)):
- _ret = []
- for element in value:
- if ipaddr(element, query, version = False, alias = 'ipwrap'):
- _ret.append(ipaddr(element, 'wrap'))
- else:
- _ret.append(element)
-
- return _ret
- else:
- _ret = ipaddr(value, query, version = False, alias = 'ipwrap')
- if _ret:
- return ipaddr(_ret, 'wrap')
- else:
- return value
-
- except:
- return value
-
-
-def ipv4(value, query = ''):
- return ipaddr(value, query, version = 4, alias = 'ipv4')
-
-
-def ipv6(value, query = ''):
- return ipaddr(value, query, version = 6, alias = 'ipv6')
-
-
-# Split given subnet into smaller subnets or find out the biggest subnet of
-# a given IP address with given CIDR prefix
-# Usage:
-#
-# - address or address/prefix | ipsubnet
-# returns CIDR subnet of a given input
-#
-# - address/prefix | ipsubnet(cidr)
-# returns number of possible subnets for given CIDR prefix
-#
-# - address/prefix | ipsubnet(cidr, index)
-# returns new subnet with given CIDR prefix
-#
-# - address | ipsubnet(cidr)
-# returns biggest subnet with given CIDR prefix that address belongs to
-#
-# - address | ipsubnet(cidr, index)
-# returns next indexed subnet which contains given address
-def ipsubnet(value, query = '', index = 'x'):
- ''' Manipulate IPv4/IPv6 subnets '''
-
- try:
- vtype = ipaddr(value, 'type')
- if vtype == 'address':
- v = ipaddr(value, 'cidr')
- elif vtype == 'network':
- v = ipaddr(value, 'subnet')
-
- value = netaddr.IPNetwork(v)
- except:
- return False
-
- if not query:
- return str(value)
-
- elif str(query).isdigit():
- vsize = ipaddr(v, 'size')
- query = int(query)
-
- try:
- float(index)
- index = int(index)
-
- if vsize > 1:
- try:
- return str(list(value.subnet(query))[index])
- except:
- return False
-
- elif vsize == 1:
- try:
- return str(value.supernet(query)[index])
- except:
- return False
-
- except:
- if vsize > 1:
- try:
- return str(len(list(value.subnet(query))))
- except:
- return False
-
- elif vsize == 1:
- try:
- return str(value.supernet(query)[0])
- except:
- return False
-
- return False
-
-# Returns the nth host within a network described by value.
-# Usage:
-#
-# - address or address/prefix | nthhost(nth)
-# returns the nth host within the given network
-def nthhost(value, query=''):
- ''' Get the nth host within a given network '''
- try:
- vtype = ipaddr(value, 'type')
- if vtype == 'address':
- v = ipaddr(value, 'cidr')
- elif vtype == 'network':
- v = ipaddr(value, 'subnet')
-
- value = netaddr.IPNetwork(v)
- except:
- return False
-
- if not query:
- return False
-
- try:
- vsize = ipaddr(v, 'size')
- nth = int(query)
- if value.size > nth:
- return value[nth]
-
- except ValueError:
- return False
-
- return False
-
-
-# ---- HWaddr / MAC address filters ----
-
-def hwaddr(value, query = '', alias = 'hwaddr'):
- ''' Check if string is a HW/MAC address and filter it '''
-
- query_func_extra_args = {
- '': ('value',),
- }
- query_func_map = {
- '': _empty_hwaddr_query,
- 'bare': _bare_query,
- 'bool': _bool_hwaddr_query,
- 'cisco': _cisco_query,
- 'eui48': _win_query,
- 'linux': _linux_query,
- 'pgsql': _postgresql_query,
- 'postgresql': _postgresql_query,
- 'psql': _postgresql_query,
- 'unix': _unix_query,
- 'win': _win_query,
- }
-
- try:
- v = netaddr.EUI(value)
- except:
- if query and query != 'bool':
- raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
-
- extras = []
- for arg in query_func_extra_args.get(query, tuple()):
- extras.append(locals()[arg])
- try:
- return query_func_map[query](v, *extras)
- except KeyError:
- raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
-
- return False
-
-def macaddr(value, query = ''):
- return hwaddr(value, query, alias = 'macaddr')
-
-def _need_netaddr(f_name, *args, **kwargs):
- raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
- ' installed on the ansible controller'.format(f_name))
-
-# ---- Ansible filters ----
-
-class FilterModule(object):
- ''' IP address and network manipulation filters '''
- filter_map = {
- # IP addresses and networks
- 'ipaddr': ipaddr,
- 'ipwrap': ipwrap,
- 'ipv4': ipv4,
- 'ipv6': ipv6,
- 'ipsubnet': ipsubnet,
- 'nthhost': nthhost,
-
- # MAC / HW addresses
- 'hwaddr': hwaddr,
- 'macaddr': macaddr
- }
-
- def filters(self):
- if netaddr:
- return self.filter_map
- else:
- # Need to install python-netaddr for these filters to work
- return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
diff --git a/v1/ansible/runner/filter_plugins/mathstuff.py b/v1/ansible/runner/filter_plugins/mathstuff.py
deleted file mode 100644
index c6a49485a4..0000000000
--- a/v1/ansible/runner/filter_plugins/mathstuff.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# (c) 2014, Brian Coca <bcoca@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import math
-import collections
-from ansible import errors
-
-def unique(a):
- if isinstance(a,collections.Hashable):
- c = set(a)
- else:
- c = []
- for x in a:
- if x not in c:
- c.append(x)
- return c
-
-def intersect(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) & set(b)
- else:
- c = unique(filter(lambda x: x in b, a))
- return c
-
-def difference(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) - set(b)
- else:
- c = unique(filter(lambda x: x not in b, a))
- return c
-
-def symmetric_difference(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) ^ set(b)
- else:
- c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
- return c
-
-def union(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) | set(b)
- else:
- c = unique(a + b)
- return c
-
-def min(a):
- _min = __builtins__.get('min')
- return _min(a);
-
-def max(a):
- _max = __builtins__.get('max')
- return _max(a);
-
-def isnotanumber(x):
- try:
- return math.isnan(x)
- except TypeError:
- return False
-
-
-def logarithm(x, base=math.e):
- try:
- if base == 10:
- return math.log10(x)
- else:
- return math.log(x, base)
- except TypeError, e:
- raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
-
-
-def power(x, y):
- try:
- return math.pow(x, y)
- except TypeError, e:
- raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
-
-
-def inversepower(x, base=2):
- try:
- if base == 2:
- return math.sqrt(x)
- else:
- return math.pow(x, 1.0/float(base))
- except TypeError, e:
- raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
-
-
-class FilterModule(object):
- ''' Ansible math jinja2 filters '''
-
- def filters(self):
- return {
- # general math
- 'isnan': isnotanumber,
- 'min' : min,
- 'max' : max,
-
- # exponents and logarithms
- 'log': logarithm,
- 'pow': power,
- 'root': inversepower,
-
- # set theory
- 'unique' : unique,
- 'intersect': intersect,
- 'difference': difference,
- 'symmetric_difference': symmetric_difference,
- 'union': union,
-
- }
diff --git a/v1/ansible/runner/lookup_plugins/__init__.py b/v1/ansible/runner/lookup_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/lookup_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/lookup_plugins/cartesian.py b/v1/ansible/runner/lookup_plugins/cartesian.py
deleted file mode 100644
index ab7bba0f0f..0000000000
--- a/v1/ansible/runner/lookup_plugins/cartesian.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2013, Bradley Young <young.bradley@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-import ansible.errors as errors
-from itertools import product
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
- """
- Create the cartesian product of lists
- [1, 2, 3], [a, b] -> [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]
- """
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- if len(my_list) == 0:
- raise errors.AnsibleError("with_cartesian requires at least one element in each list")
- return [flatten(x) for x in product(*my_list)]
-
-
diff --git a/v1/ansible/runner/lookup_plugins/consul_kv.py b/v1/ansible/runner/lookup_plugins/consul_kv.py
deleted file mode 100755
index 522fa8deb7..0000000000
--- a/v1/ansible/runner/lookup_plugins/consul_kv.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Lookup plugin to grab metadata from a consul key value store.
-============================================================
-
-Plugin will lookup metadata for a playbook from the key value store in a
-consul cluster. Values can be easily set in the kv store with simple rest
-commands e.g.
-
-curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata
-
-this can then be looked up in a playbook as follows
-
-- debug: msg='key contains {{item}}'
- with_consul_kv:
- - 'key/to/retrieve'
-
-
-Parameters can be provided after the key be more specific about what to retrieve e.g.
-
-- debug: msg='key contains {{item}}'
- with_consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}'
-
-recurse: if true, will retrieve all the values that have the given key as prefix
-index: if the key has a value with the specified index then this is returned
- allowing access to historical values.
-token: acl token to allow access to restricted values.
-
-By default this will lookup keys via the consul agent running on http://localhost:8500
-this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url
-of the kv store you'd like to use.
-
-'''
-
-######################################################################
-
-import os
-import sys
-from urlparse import urlparse
-from ansible import utils, errors
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-try:
- import consul
-except ImportError, e:
- print "failed=True msg='python-consul required for this module. "\
- "see http://python-consul.readthedocs.org/en/latest/#installation'"
- sys.exit(1)
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
-
- self.basedir = basedir
- self.agent_url = 'http://localhost:8500'
- if os.getenv('ANSIBLE_CONSUL_URL') is not None:
- self.agent_url = os.environ['ANSIBLE_CONSUL_URL']
-
- def run(self, terms, inject=None, **kwargs):
-
- u = urlparse(self.agent_url)
- consul_api = consul.Consul(host=u.hostname, port=u.port)
-
- values = []
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- try:
- for term in terms:
- params = self.parse_params(term)
- results = consul_api.kv.get(params['key'],
- token=params['token'],
- index=params['index'],
- recurse=params['recurse'])
- if results[1]:
- # responds with a single or list of result maps
- if isinstance(results[1], list):
- for r in results[1]:
- values.append(r['Value'])
- else:
- values.append(results[1]['Value'])
- except Exception, e:
- raise errors.AnsibleError(
- "Error locating '%s' in kv store. Error was %s" % (term, e))
-
- return values
-
- def parse_params(self, term):
- params = term.split(' ')
-
- paramvals = {
- 'key': params[0],
- 'token': None,
- 'recurse': False,
- 'index': None
- }
-
- # parameters specified?
- try:
- for param in params[1:]:
- if param and len(param) > 0:
- name, value = param.split('=')
- assert name in paramvals, "% not a valid consul lookup parameter" % name
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- return paramvals
diff --git a/v1/ansible/runner/lookup_plugins/csvfile.py b/v1/ansible/runner/lookup_plugins/csvfile.py
deleted file mode 100644
index a9ea8ed90c..0000000000
--- a/v1/ansible/runner/lookup_plugins/csvfile.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-import codecs
-import csv
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def read_csv(self, filename, key, delimiter, dflt=None, col=1):
-
- try:
- f = codecs.open(filename, 'r', encoding='utf-8')
- creader = csv.reader(f, delimiter=delimiter)
-
- for row in creader:
- if row[0] == key:
- return row[int(col)]
- except Exception, e:
- raise errors.AnsibleError("csvfile: %s" % str(e))
-
- return dflt
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- params = term.split()
- key = params[0]
-
- paramvals = {
- 'file' : 'ansible.csv',
- 'default' : None,
- 'delimiter' : "TAB",
- 'col' : "1", # column to return
- }
-
- # parameters specified?
- try:
- for param in params[1:]:
- name, value = param.split('=')
- assert(name in paramvals)
- if name == 'delimiter':
- paramvals[name] = str(value)
- else:
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- if paramvals['delimiter'] == 'TAB':
- paramvals['delimiter'] = "\t"
-
- path = utils.path_dwim(self.basedir, paramvals['file'])
-
- var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
- if var is not None:
- if type(var) is list:
- for v in var:
- ret.append(v)
- else:
- ret.append(var)
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/dict.py b/v1/ansible/runner/lookup_plugins/dict.py
deleted file mode 100644
index cda1546598..0000000000
--- a/v1/ansible/runner/lookup_plugins/dict.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# (c) 2014, Kent R. Spillner <kspillner@acm.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-
-def flatten_hash_to_list(terms):
- ret = []
- for key in terms:
- ret.append({'key': key, 'value': terms[key]})
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, dict):
- raise errors.AnsibleError("with_dict expects a dict")
-
- return flatten_hash_to_list(terms)
diff --git a/v1/ansible/runner/lookup_plugins/dig.py b/v1/ansible/runner/lookup_plugins/dig.py
deleted file mode 100644
index a549a4a157..0000000000
--- a/v1/ansible/runner/lookup_plugins/dig.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import socket
-HAVE_DNS=False
-try:
- import dns.resolver
- import dns.reversename
- from dns.rdatatype import *
- from dns.exception import DNSException
- HAVE_DNS=True
-except ImportError:
- pass
-
-def make_rdata_dict(rdata):
- ''' While the 'dig' lookup plugin supports anything which dnspython supports
- out of the box, the following supported_types list describes which
- DNS query types we can convert to a dict.
-
- Note: adding support for RRSIG is hard work. :)
- '''
- supported_types = {
- A : ['address'],
- AAAA : ['address'],
- CNAME : ['target'],
- DNAME : ['target'],
- DLV : ['algorithm', 'digest_type', 'key_tag', 'digest'],
- DNSKEY : ['flags', 'algorithm', 'protocol', 'key'],
- DS : ['algorithm', 'digest_type', 'key_tag', 'digest'],
- HINFO : ['cpu', 'os'],
- LOC : ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
- MX : ['preference', 'exchange'],
- NAPTR : ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
- NS : ['target'],
- NSEC3PARAM : ['algorithm', 'flags', 'iterations', 'salt'],
- PTR : ['target'],
- RP : ['mbox', 'txt'],
- # RRSIG : ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
- SOA : ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
- SPF : ['strings'],
- SRV : ['priority', 'weight', 'port', 'target'],
- SSHFP : ['algorithm', 'fp_type', 'fingerprint'],
- TLSA : ['usage', 'selector', 'mtype', 'cert'],
- TXT : ['strings'],
- }
-
- rd = {}
-
- if rdata.rdtype in supported_types:
- fields = supported_types[rdata.rdtype]
- for f in fields:
- val = rdata.__getattribute__(f)
-
- if type(val) == dns.name.Name:
- val = dns.name.Name.to_text(val)
-
- if rdata.rdtype == DLV and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DS and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DNSKEY and f == 'key':
- val = dns.rdata._base64ify(rdata.key).replace(' ', '')
- if rdata.rdtype == NSEC3PARAM and f == 'salt':
- val = dns.rdata._hexify(rdata.salt).replace(' ', '')
- if rdata.rdtype == SSHFP and f == 'fingerprint':
- val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
- if rdata.rdtype == TLSA and f == 'cert':
- val = dns.rdata._hexify(rdata.cert).replace(' ', '')
-
-
- rd[f] = val
-
- return rd
-
-# ==============================================================
-# dig: Lookup DNS records
-#
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_DNS == False:
- raise errors.AnsibleError("Can't LOOKUP(dig): module dns.resolver is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- '''
- terms contains a string with things to `dig' for. We support the
- following formats:
- example.com # A record
- example.com qtype=A # same
- example.com/TXT # specific qtype
- example.com qtype=txt # same
- 192.168.1.2/PTR # reverse PTR
- ^^ shortcut for 2.1.168.192.in-addr.arpa/PTR
- example.net/AAAA @nameserver # query specified server
- ^^^ can be comma-sep list of names/addresses
-
- ... flat=0 # returns a dict; default is 1 == string
- '''
- terms = terms.split()
-
- # Create Resolver object so that we can set NS if necessary
- myres = dns.resolver.Resolver()
- edns_size = 4096
- myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
-
- domain = None
- qtype = 'A'
- flat = True
-
- for t in terms:
- if t.startswith('@'): # e.g. "@10.0.1.2,192.168.1.1" is ok.
- nsset = t[1:].split(',')
- nameservers = []
- for ns in nsset:
- # Check if we have a valid IP address. If so, use that, otherwise
- # try to resolve name to address using system's resolver. If that
- # fails we bail out.
- try:
- socket.inet_aton(ns)
- nameservers.append(ns)
- except:
- try:
- nsaddr = dns.resolver.query(ns)[0].address
- nameservers.append(nsaddr)
- except Exception, e:
- raise errors.AnsibleError("dns lookup NS: ", str(e))
- myres.nameservers = nameservers
- continue
- if '=' in t:
- try:
- opt, arg = t.split('=')
- except:
- pass
-
- if opt == 'qtype':
- qtype = arg.upper()
- elif opt == 'flat':
- flat = int(arg)
-
- continue
-
- if '/' in t:
- try:
- domain, qtype = t.split('/')
- except:
- domain = t
- else:
- domain = t
-
- # print "--- domain = {0} qtype={1}".format(domain, qtype)
-
- ret = []
-
- if qtype.upper() == 'PTR':
- try:
- n = dns.reversename.from_address(domain)
- domain = n.to_text()
- except dns.exception.SyntaxError:
- pass
- except Exception, e:
- raise errors.AnsibleError("dns.reversename unhandled exception", str(e))
-
- try:
- answers = myres.query(domain, qtype)
- for rdata in answers:
- s = rdata.to_text()
- if qtype.upper() == 'TXT':
- s = s[1:-1] # Strip outside quotes on TXT rdata
-
- if flat:
- ret.append(s)
- else:
- try:
- rd = make_rdata_dict(rdata)
- rd['owner'] = answers.canonical_name.to_text()
- rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
- rd['ttl'] = answers.rrset.ttl
-
- ret.append(rd)
- except Exception, e:
- ret.append(str(e))
-
- except dns.resolver.NXDOMAIN:
- ret.append('NXDOMAIN')
- except dns.resolver.NoAnswer:
- ret.append("")
- except dns.resolver.Timeout:
- ret.append('')
- except dns.exception.DNSException, e:
- raise errors.AnsibleError("dns.resolver unhandled exception", e)
-
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/dnstxt.py b/v1/ansible/runner/lookup_plugins/dnstxt.py
deleted file mode 100644
index 4fa47bf4ee..0000000000
--- a/v1/ansible/runner/lookup_plugins/dnstxt.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-HAVE_DNS=False
-try:
- import dns.resolver
- from dns.exception import DNSException
- HAVE_DNS=True
-except ImportError:
- pass
-
-# ==============================================================
-# DNSTXT: DNS TXT records
-#
-# key=domainname
-# TODO: configurable resolver IPs
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_DNS == False:
- raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- domain = term.split()[0]
- string = []
- try:
- answers = dns.resolver.query(domain, 'TXT')
- for rdata in answers:
- s = rdata.to_text()
- string.append(s[1:-1]) # Strip outside quotes on TXT rdata
-
- except dns.resolver.NXDOMAIN:
- string = 'NXDOMAIN'
- except dns.resolver.Timeout:
- string = ''
- except dns.exception.DNSException, e:
- raise errors.AnsibleError("dns.resolver unhandled exception", e)
-
- ret.append(''.join(string))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/env.py b/v1/ansible/runner/lookup_plugins/env.py
deleted file mode 100644
index d4f85356ed..0000000000
--- a/v1/ansible/runner/lookup_plugins/env.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-from ansible.utils import template
-import os
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- try:
- terms = template.template(self.basedir, terms, inject)
- except Exception, e:
- pass
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- var = term.split()[0]
- ret.append(os.getenv(var, ''))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/etcd.py b/v1/ansible/runner/lookup_plugins/etcd.py
deleted file mode 100644
index a758a2fb0b..0000000000
--- a/v1/ansible/runner/lookup_plugins/etcd.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-import os
-import urllib2
-try:
- import json
-except ImportError:
- import simplejson as json
-
-# this can be made configurable, not should not use ansible.cfg
-ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
-if os.getenv('ANSIBLE_ETCD_URL') is not None:
- ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
-
-class etcd():
- def __init__(self, url=ANSIBLE_ETCD_URL):
- self.url = url
- self.baseurl = '%s/v1/keys' % (self.url)
-
- def get(self, key):
- url = "%s/%s" % (self.baseurl, key)
-
- data = None
- value = ""
- try:
- r = urllib2.urlopen(url)
- data = r.read()
- except:
- return value
-
- try:
- # {"action":"get","key":"/name","value":"Jane Jolie","index":5}
- item = json.loads(data)
- if 'value' in item:
- value = item['value']
- if 'errorCode' in item:
- value = "ENOENT"
- except:
- raise
- pass
-
- return value
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- self.etcd = etcd()
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- key = term.split()[0]
- value = self.etcd.get(key)
- ret.append(value)
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/file.py b/v1/ansible/runner/lookup_plugins/file.py
deleted file mode 100644
index 70bae6653a..0000000000
--- a/v1/ansible/runner/lookup_plugins/file.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-import codecs
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- ret = []
-
- # this can happen if the variable contains a string, strictly not desired for lookup
- # plugins, but users may try it, so make it work.
- if not isinstance(terms, list):
- terms = [ terms ]
-
- for term in terms:
- basedir_path = utils.path_dwim(self.basedir, term)
- relative_path = None
- playbook_path = None
-
- # Special handling of the file lookup, used primarily when the
- # lookup is done from a role. If the file isn't found in the
- # basedir of the current file, use dwim_relative to look in the
- # role/files/ directory, and finally the playbook directory
- # itself (which will be relative to the current working dir)
- if '_original_file' in inject:
- relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False)
- if 'playbook_dir' in inject:
- playbook_path = os.path.join(inject['playbook_dir'], term)
-
- for path in (basedir_path, relative_path, playbook_path):
- if path and os.path.exists(path):
- ret.append(codecs.open(path, encoding="utf8").read().rstrip())
- break
- else:
- raise errors.AnsibleError("could not locate file in lookup: %s" % term)
-
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/fileglob.py b/v1/ansible/runner/lookup_plugins/fileglob.py
deleted file mode 100644
index 7d3cbb92be..0000000000
--- a/v1/ansible/runner/lookup_plugins/fileglob.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import glob
-from ansible import utils
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
-
- for term in terms:
-
- dwimmed = utils.path_dwim(self.basedir, term)
- globbed = glob.glob(dwimmed)
- ret.extend(g for g in globbed if os.path.isfile(g))
-
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/first_found.py b/v1/ansible/runner/lookup_plugins/first_found.py
deleted file mode 100644
index a48b56a3c2..0000000000
--- a/v1/ansible/runner/lookup_plugins/first_found.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-# take a list of files and (optionally) a list of paths
-# return the first existing file found in the paths
-# [file1, file2, file3], [path1, path2, path3]
-# search order is:
-# path1/file1
-# path1/file2
-# path1/file3
-# path2/file1
-# path2/file2
-# path2/file3
-# path3/file1
-# path3/file2
-# path3/file3
-
-# first file found with os.path.exists() is returned
-# no file matches raises ansibleerror
-# EXAMPLES
-# - name: copy first existing file found to /some/file
-# action: copy src=$item dest=/some/file
-# with_first_found:
-# - files: foo ${inventory_hostname} bar
-# paths: /tmp/production /tmp/staging
-
-# that will look for files in this order:
-# /tmp/production/foo
-# ${inventory_hostname}
-# bar
-# /tmp/staging/foo
-# ${inventory_hostname}
-# bar
-
-# - name: copy first existing file found to /some/file
-# action: copy src=$item dest=/some/file
-# with_first_found:
-# - files: /some/place/foo ${inventory_hostname} /some/place/else
-
-# that will look for files in this order:
-# /some/place/foo
-# $relative_path/${inventory_hostname}
-# /some/place/else
-
-# example - including tasks:
-# tasks:
-# - include: $item
-# with_first_found:
-# - files: generic
-# paths: tasks/staging tasks/production
-# this will include the tasks in the file generic where it is found first (staging or production)
-
-# example simple file lists
-#tasks:
-#- name: first found file
-# action: copy src=$item dest=/etc/file.cfg
-# with_first_found:
-# - files: foo.${inventory_hostname} foo
-
-
-# example skipping if no matched files
-# First_found also offers the ability to control whether or not failing
-# to find a file returns an error or not
-#
-#- name: first found file - or skip
-# action: copy src=$item dest=/etc/file.cfg
-# with_first_found:
-# - files: foo.${inventory_hostname}
-# skip: true
-
-# example a role with default configuration and configuration per host
-# you can set multiple terms with their own files and paths to look through.
-# consider a role that sets some configuration per host falling back on a default config.
-#
-#- name: some configuration template
-# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
-# with_first_found:
-# - files:
-# - ${inventory_hostname}/etc/file.cfg
-# paths:
-# - ../../../templates.overwrites
-# - ../../../templates
-# - files:
-# - etc/file.cfg
-# paths:
-# - templates
-
-# the above will return an empty list if the files cannot be found at all
-# if skip is unspecificed or if it is set to false then it will return a list
-# error which can be caught bye ignore_errors: true for that action.
-
-# finally - if you want you can use it, in place to replace first_available_file:
-# you simply cannot use the - files, path or skip options. simply replace
-# first_available_file with with_first_found and leave the file listing in place
-#
-#
-# - name: with_first_found like first_available_file
-# action: copy src=$item dest=/tmp/faftest
-# with_first_found:
-# - ../files/foo
-# - ../files/bar
-# - ../files/baz
-# ignore_errors: true
-
-
-from ansible import utils, errors
-import os
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- result = None
- anydict = False
- skip = False
-
- for term in terms:
- if isinstance(term, dict):
- anydict = True
-
- total_search = []
- if anydict:
- for term in terms:
- if isinstance(term, dict):
- files = term.get('files', [])
- paths = term.get('paths', [])
- skip = utils.boolean(term.get('skip', False))
-
- filelist = files
- if isinstance(files, basestring):
- files = files.replace(',', ' ')
- files = files.replace(';', ' ')
- filelist = files.split(' ')
-
- pathlist = paths
- if paths:
- if isinstance(paths, basestring):
- paths = paths.replace(',', ' ')
- paths = paths.replace(':', ' ')
- paths = paths.replace(';', ' ')
- pathlist = paths.split(' ')
-
- if not pathlist:
- total_search = filelist
- else:
- for path in pathlist:
- for fn in filelist:
- f = os.path.join(path, fn)
- total_search.append(f)
- else:
- total_search.append(term)
- else:
- total_search = terms
-
- for fn in total_search:
- if inject and '_original_file' in inject:
- # check the templates and vars directories too,
- # if they exist
- for roledir in ('templates', 'vars'):
- path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn)
- if os.path.exists(path):
- return [path]
- # if none of the above were found, just check the
- # current filename against the basedir (this will already
- # have ../files from runner, if it's a role task
- path = utils.path_dwim(self.basedir, fn)
- if os.path.exists(path):
- return [path]
- else:
- if skip:
- return []
- else:
- return [None]
-
diff --git a/v1/ansible/runner/lookup_plugins/flattened.py b/v1/ansible/runner/lookup_plugins/flattened.py
deleted file mode 100644
index 6d9dd613be..0000000000
--- a/v1/ansible/runner/lookup_plugins/flattened.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-import ansible.errors as errors
-
-
-def check_list_of_one_list(term):
- # make sure term is not a list of one (list of one..) item
- # return the final non list item if so
-
- if isinstance(term,list) and len(term) == 1:
- term = term[0]
- if isinstance(term,list):
- term = check_list_of_one_list(term)
-
- return term
-
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
-
- def flatten(self, terms, inject):
-
- ret = []
- for term in terms:
- term = check_list_of_one_list(term)
-
- if term == 'None' or term == 'null':
- # ignore undefined items
- break
-
- if isinstance(term, basestring):
- # convert a variable to a list
- term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject)
- # but avoid converting a plain string to a list of one string
- if term2 != [ term ]:
- term = term2
-
- if isinstance(term, list):
- # if it's a list, check recursively for items that are a list
- term = self.flatten(term, inject)
- ret.extend(term)
- else:
- ret.append(term)
-
- return ret
-
-
- def run(self, terms, inject=None, **kwargs):
-
- # see if the string represents a list and convert to list if so
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_flattened expects a list")
-
- ret = self.flatten(terms, inject)
- return ret
-
diff --git a/v1/ansible/runner/lookup_plugins/indexed_items.py b/v1/ansible/runner/lookup_plugins/indexed_items.py
deleted file mode 100644
index c1db1fdee2..0000000000
--- a/v1/ansible/runner/lookup_plugins/indexed_items.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_indexed_items expects a list")
-
- items = flatten(terms)
- return zip(range(len(items)), items)
-
diff --git a/v1/ansible/runner/lookup_plugins/inventory_hostnames.py b/v1/ansible/runner/lookup_plugins/inventory_hostnames.py
deleted file mode 100644
index 98523e1398..0000000000
--- a/v1/ansible/runner/lookup_plugins/inventory_hostnames.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Steven Dossett <sdossett@panath.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-import ansible.inventory as inventory
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- if 'runner' in kwargs:
- self.host_list = kwargs['runner'].inventory.host_list
- else:
- raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_inventory_hostnames expects a list")
- return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
-
diff --git a/v1/ansible/runner/lookup_plugins/items.py b/v1/ansible/runner/lookup_plugins/items.py
deleted file mode 100644
index 85e77d5380..0000000000
--- a/v1/ansible/runner/lookup_plugins/items.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list) and not isinstance(terms,set):
- raise errors.AnsibleError("with_items expects a list or a set")
-
- return flatten(terms)
-
-
diff --git a/v1/ansible/runner/lookup_plugins/lines.py b/v1/ansible/runner/lookup_plugins/lines.py
deleted file mode 100644
index 5d4b70a857..0000000000
--- a/v1/ansible/runner/lookup_plugins/lines.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-from ansible import utils, errors
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
- for term in terms:
- p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
- if p.returncode == 0:
- ret.extend(stdout.splitlines())
- else:
- raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/nested.py b/v1/ansible/runner/lookup_plugins/nested.py
deleted file mode 100644
index 29c4a7d21c..0000000000
--- a/v1/ansible/runner/lookup_plugins/nested.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-from ansible.utils import safe_eval
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-def combine(a,b):
- results = []
- for x in a:
- for y in b:
- results.append(flatten([x,y]))
- return results
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- # this code is common with 'items.py' consider moving to utils if we need it again
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- my_list.reverse()
- result = []
- if len(my_list) == 0:
- raise errors.AnsibleError("with_nested requires at least one element in the nested list")
- result = my_list.pop()
- while len(my_list) > 0:
- result2 = combine(result, my_list.pop())
- result = result2
- new_result = []
- for x in result:
- new_result.append(flatten(x))
- return new_result
-
-
diff --git a/v1/ansible/runner/lookup_plugins/password.py b/v1/ansible/runner/lookup_plugins/password.py
deleted file mode 100644
index a066887e2c..0000000000
--- a/v1/ansible/runner/lookup_plugins/password.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-# (c) 2013, Javier Candeira <javier@candeira.com>
-# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-import errno
-from string import ascii_letters, digits
-import string
-import random
-
-
-class LookupModule(object):
-
- LENGTH = 20
-
- def __init__(self, length=None, encrypt=None, basedir=None, **kwargs):
- self.basedir = basedir
-
- def random_salt(self):
- salt_chars = ascii_letters + digits + './'
- return utils.random_password(length=8, chars=salt_chars)
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
-
- for term in terms:
- # you can't have escaped spaces in yor pathname
- params = term.split()
- relpath = params[0]
-
- paramvals = {
- 'length': LookupModule.LENGTH,
- 'encrypt': None,
- 'chars': ['ascii_letters','digits',".,:-_"],
- }
-
- # get non-default parameters if specified
- try:
- for param in params[1:]:
- name, value = param.split('=')
- assert(name in paramvals)
- if name == 'length':
- paramvals[name] = int(value)
- elif name == 'chars':
- use_chars=[]
- if ",," in value:
- use_chars.append(',')
- use_chars.extend(value.replace(',,',',').split(','))
- paramvals['chars'] = use_chars
- else:
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- length = paramvals['length']
- encrypt = paramvals['encrypt']
- use_chars = paramvals['chars']
-
- # get password or create it if file doesn't exist
- path = utils.path_dwim(self.basedir, relpath)
- if not os.path.exists(path):
- pathdir = os.path.dirname(path)
- if not os.path.isdir(pathdir):
- try:
- os.makedirs(pathdir, mode=0700)
- except OSError, e:
- raise errors.AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
-
- chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
- password = ''.join(random.choice(chars) for _ in range(length))
-
- if encrypt is not None:
- salt = self.random_salt()
- content = '%s salt=%s' % (password, salt)
- else:
- content = password
- with open(path, 'w') as f:
- os.chmod(path, 0600)
- f.write(content + '\n')
- else:
- content = open(path).read().rstrip()
- sep = content.find(' ')
-
- if sep >= 0:
- password = content[:sep]
- salt = content[sep+1:].split('=')[1]
- else:
- password = content
- salt = None
-
- # crypt requested, add salt if missing
- if (encrypt is not None and not salt):
- salt = self.random_salt()
- content = '%s salt=%s' % (password, salt)
- with open(path, 'w') as f:
- os.chmod(path, 0600)
- f.write(content + '\n')
- # crypt not requested, remove salt if present
- elif (encrypt is None and salt):
- with open(path, 'w') as f:
- os.chmod(path, 0600)
- f.write(password + '\n')
-
- if encrypt:
- password = utils.do_encrypt(password, encrypt, salt=salt)
-
- ret.append(password)
-
- return ret
-
diff --git a/v1/ansible/runner/lookup_plugins/pipe.py b/v1/ansible/runner/lookup_plugins/pipe.py
deleted file mode 100644
index 0cd9e1cda5..0000000000
--- a/v1/ansible/runner/lookup_plugins/pipe.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-from ansible import utils, errors
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- '''
- http://docs.python.org/2/library/subprocess.html#popen-constructor
-
- The shell argument (which defaults to False) specifies whether to use the
- shell as the program to execute. If shell is True, it is recommended to pass
- args as a string rather than as a sequence
-
- https://github.com/ansible/ansible/issues/6550
- '''
- term = str(term)
-
- p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
- if p.returncode == 0:
- ret.append(stdout.decode("utf-8").rstrip())
- else:
- raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/random_choice.py b/v1/ansible/runner/lookup_plugins/random_choice.py
deleted file mode 100644
index 9b32c2f119..0000000000
--- a/v1/ansible/runner/lookup_plugins/random_choice.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import random
-from ansible import utils
-
-# useful for introducing chaos ... or just somewhat reasonably fair selection
-# amongst available mirrors
-#
-# tasks:
-# - debug: msg=$item
-# with_random_choice:
-# - one
-# - two
-# - three
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- return [ random.choice(terms) ]
-
diff --git a/v1/ansible/runner/lookup_plugins/redis_kv.py b/v1/ansible/runner/lookup_plugins/redis_kv.py
deleted file mode 100644
index 22c5c3754f..0000000000
--- a/v1/ansible/runner/lookup_plugins/redis_kv.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-HAVE_REDIS=False
-try:
- import redis # https://github.com/andymccurdy/redis-py/
- HAVE_REDIS=True
-except ImportError:
- pass
-import re
-
-# ==============================================================
-# REDISGET: Obtain value from a GET on a Redis key. Terms
-# expected: 0 = URL, 1 = Key
-# URL may be empty, in which case redis://localhost:6379 assumed
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_REDIS == False:
- raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
- for term in terms:
- (url,key) = term.split(',')
- if url == "":
- url = 'redis://localhost:6379'
-
- # urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
- # Redis' from_url() doesn't work here.
-
- p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
-
- try:
- m = re.search(p, url)
- host = m.group('host')
- port = int(m.group('port'))
- except AttributeError:
- raise errors.AnsibleError("Bad URI in redis lookup")
-
- try:
- conn = redis.Redis(host=host, port=port)
- res = conn.get(key)
- if res is None:
- res = ""
- ret.append(res)
- except:
- ret.append("") # connection failed or key not found
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/sequence.py b/v1/ansible/runner/lookup_plugins/sequence.py
deleted file mode 100644
index 68b0bbec90..0000000000
--- a/v1/ansible/runner/lookup_plugins/sequence.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.errors import AnsibleError
-import ansible.utils as utils
-from re import compile as re_compile, IGNORECASE
-
-# shortcut format
-NUM = "(0?x?[0-9a-f]+)"
-SHORTCUT = re_compile(
- "^(" + # Group 0
- NUM + # Group 1: Start
- "-)?" +
- NUM + # Group 2: End
- "(/" + # Group 3
- NUM + # Group 4: Stride
- ")?" +
- "(:(.+))?$", # Group 5, Group 6: Format String
- IGNORECASE
-)
-
-
-class LookupModule(object):
- """
- sequence lookup module
-
- Used to generate some sequence of items. Takes arguments in two forms.
-
- The simple / shortcut form is:
-
- [start-]end[/stride][:format]
-
- As indicated by the brackets: start, stride, and format string are all
- optional. The format string is in the style of printf. This can be used
- to pad with zeros, format in hexadecimal, etc. All of the numerical values
- can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
- Negative numbers are not supported.
-
- Some examples:
-
- 5 -> ["1","2","3","4","5"]
- 5-8 -> ["5", "6", "7", "8"]
- 2-10/2 -> ["2", "4", "6", "8", "10"]
- 4:host%02d -> ["host01","host02","host03","host04"]
-
- The standard Ansible key-value form is accepted as well. For example:
-
- start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
-
- This format takes an alternate form of "end" called "count", which counts
- some number from the starting value. For example:
-
- count=5 -> ["1", "2", "3", "4", "5"]
- start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
- start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
- start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
-
- The count option is mostly useful for avoiding off-by-one errors and errors
- calculating the number of entries in a sequence when a stride is specified.
- """
-
- def __init__(self, basedir, **kwargs):
- """absorb any keyword args"""
- self.basedir = basedir
-
- def reset(self):
- """set sensible defaults"""
- self.start = 1
- self.count = None
- self.end = None
- self.stride = 1
- self.format = "%d"
-
- def parse_kv_args(self, args):
- """parse key-value style arguments"""
- for arg in ["start", "end", "count", "stride"]:
- try:
- arg_raw = args.pop(arg, None)
- if arg_raw is None:
- continue
- arg_cooked = int(arg_raw, 0)
- setattr(self, arg, arg_cooked)
- except ValueError:
- raise AnsibleError(
- "can't parse arg %s=%r as integer"
- % (arg, arg_raw)
- )
- if 'format' in args:
- self.format = args.pop("format")
- if args:
- raise AnsibleError(
- "unrecognized arguments to with_sequence: %r"
- % args.keys()
- )
-
- def parse_simple_args(self, term):
- """parse the shortcut forms, return True/False"""
- match = SHORTCUT.match(term)
- if not match:
- return False
-
- _, start, end, _, stride, _, format = match.groups()
-
- if start is not None:
- try:
- start = int(start, 0)
- except ValueError:
- raise AnsibleError("can't parse start=%s as integer" % start)
- if end is not None:
- try:
- end = int(end, 0)
- except ValueError:
- raise AnsibleError("can't parse end=%s as integer" % end)
- if stride is not None:
- try:
- stride = int(stride, 0)
- except ValueError:
- raise AnsibleError("can't parse stride=%s as integer" % stride)
-
- if start is not None:
- self.start = start
- if end is not None:
- self.end = end
- if stride is not None:
- self.stride = stride
- if format is not None:
- self.format = format
-
- def sanity_check(self):
- if self.count is None and self.end is None:
- raise AnsibleError(
- "must specify count or end in with_sequence"
- )
- elif self.count is not None and self.end is not None:
- raise AnsibleError(
- "can't specify both count and end in with_sequence"
- )
- elif self.count is not None:
- # convert count to end
- if self.count != 0:
- self.end = self.start + self.count * self.stride - 1
- else:
- self.start = 0
- self.end = 0
- self.stride = 0
- del self.count
- if self.stride > 0 and self.end < self.start:
- raise AnsibleError("to count backwards make stride negative")
- if self.stride < 0 and self.end > self.start:
- raise AnsibleError("to count forward don't make stride negative")
- if self.format.count('%') != 1:
- raise AnsibleError("bad formatting string: %s" % self.format)
-
- def generate_sequence(self):
- if self.stride > 0:
- adjust = 1
- else:
- adjust = -1
- numbers = xrange(self.start, self.end + adjust, self.stride)
-
- for i in numbers:
- try:
- formatted = self.format % i
- yield formatted
- except (ValueError, TypeError):
- raise AnsibleError(
- "problem formatting %r with %r" % self.format
- )
-
- def run(self, terms, inject=None, **kwargs):
- results = []
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- for term in terms:
- try:
- self.reset() # clear out things for this iteration
-
- try:
- if not self.parse_simple_args(term):
- self.parse_kv_args(utils.parse_kv(term))
- except Exception:
- raise AnsibleError(
- "unknown error parsing with_sequence arguments: %r"
- % term
- )
-
- self.sanity_check()
-
- if self.start != self.end:
- results.extend(self.generate_sequence())
- except AnsibleError:
- raise
- except Exception, e:
- raise AnsibleError(
- "unknown error generating sequence: %s" % str(e)
- )
-
- return results
diff --git a/v1/ansible/runner/lookup_plugins/subelements.py b/v1/ansible/runner/lookup_plugins/subelements.py
deleted file mode 100644
index f33aae717d..0000000000
--- a/v1/ansible/runner/lookup_plugins/subelements.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-import ansible.errors as errors
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject)
-
- if not isinstance(terms, list) or not len(terms) == 2:
- raise errors.AnsibleError(
- "subelements lookup expects a list of two items, first a dict or a list, and second a string")
- terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject)
- if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring):
- raise errors.AnsibleError(
- "subelements lookup expects a list of two items, first a dict or a list, and second a string")
-
- if isinstance(terms[0], dict): # convert to list:
- if terms[0].get('skipped',False) != False:
- # the registered result was completely skipped
- return []
- elementlist = []
- for key in terms[0].iterkeys():
- elementlist.append(terms[0][key])
- else:
- elementlist = terms[0]
- subelement = terms[1]
-
- ret = []
- for item0 in elementlist:
- if not isinstance(item0, dict):
- raise errors.AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0)
- if item0.get('skipped',False) != False:
- # this particular item is to be skipped
- continue
- if not subelement in item0:
- raise errors.AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0))
- if not isinstance(item0[subelement], list):
- raise errors.AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement]))
- sublist = item0.pop(subelement, [])
- for item1 in sublist:
- ret.append((item0, item1))
-
- return ret
-
diff --git a/v1/ansible/runner/lookup_plugins/template.py b/v1/ansible/runner/lookup_plugins/template.py
deleted file mode 100644
index e009b6b76b..0000000000
--- a/v1/ansible/runner/lookup_plugins/template.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import template
-import ansible.utils as utils
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
- for term in terms:
- ret.append(template.template_from_file(self.basedir, term, inject))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/together.py b/v1/ansible/runner/lookup_plugins/together.py
deleted file mode 100644
index 07332c9fb9..0000000000
--- a/v1/ansible/runner/lookup_plugins/together.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# (c) 2013, Bradley Young <young.bradley@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-from ansible.utils import safe_eval
-import ansible.errors as errors
-from itertools import izip_longest
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
- """
- Transpose a list of arrays:
- [1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
- Replace any empty spots in 2nd array with None:
- [1, 2], [3] -> [1, 3], [2, None]
- """
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- # this code is common with 'items.py' consider moving to utils if we need it again
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- if len(my_list) == 0:
- raise errors.AnsibleError("with_together requires at least one element in each list")
- return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
-
-
diff --git a/v1/ansible/runner/lookup_plugins/url.py b/v1/ansible/runner/lookup_plugins/url.py
deleted file mode 100644
index b42b3b14da..0000000000
--- a/v1/ansible/runner/lookup_plugins/url.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2015, Brian Coca <bcoca@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-import urllib2
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- try:
- r = urllib2.Request(term)
- response = urllib2.urlopen(r)
- except URLError, e:
- utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
- continue
- except HTTPError, e:
- utils.warnings("Received HTTP error for %s : %s" % (term, str(e)))
- continue
-
- for line in response.read().splitlines():
- ret.append(line)
-
- return ret
diff --git a/v1/ansible/runner/poller.py b/v1/ansible/runner/poller.py
deleted file mode 100644
index 0218481415..0000000000
--- a/v1/ansible/runner/poller.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import time
-
-from ansible import errors
-
-class AsyncPoller(object):
- """ Manage asynchronous jobs. """
-
- def __init__(self, results, runner):
- self.runner = runner
-
- self.results = { 'contacted': {}, 'dark': {}}
- self.hosts_to_poll = []
- self.completed = False
-
- # flag to determine if at least one host was contacted
- self.active = False
- # True to work with the `and` below
- skipped = True
- jid = None
- for (host, res) in results['contacted'].iteritems():
- if res.get('started', False):
- self.hosts_to_poll.append(host)
- jid = res.get('ansible_job_id', None)
- self.runner.vars_cache[host]['ansible_job_id'] = jid
- self.active = True
- else:
- skipped = skipped and res.get('skipped', False)
- self.runner.vars_cache[host]['ansible_job_id'] = ''
- self.results['contacted'][host] = res
- for (host, res) in results['dark'].iteritems():
- self.runner.vars_cache[host]['ansible_job_id'] = ''
- self.results['dark'][host] = res
-
- if not skipped:
- if jid is None:
- raise errors.AnsibleError("unexpected error: unable to determine jid")
- if len(self.hosts_to_poll)==0:
- raise errors.AnsibleError("unexpected error: no hosts to poll")
-
- def poll(self):
- """ Poll the job status.
-
- Returns the changes in this iteration."""
- self.runner.module_name = 'async_status'
- self.runner.module_args = "jid={{ansible_job_id}}"
- self.runner.pattern = "*"
- self.runner.background = 0
- self.runner.complex_args = None
-
- self.runner.inventory.restrict_to(self.hosts_to_poll)
- results = self.runner.run()
- self.runner.inventory.lift_restriction()
-
- hosts = []
- poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
- for (host, res) in results['contacted'].iteritems():
- if res.get('started',False):
- hosts.append(host)
- poll_results['polled'][host] = res
- else:
- self.results['contacted'][host] = res
- poll_results['contacted'][host] = res
- if res.get('failed', False) or res.get('rc', 0) != 0:
- self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
- else:
- self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
- for (host, res) in results['dark'].iteritems():
- self.results['dark'][host] = res
- poll_results['dark'][host] = res
- if host in self.hosts_to_poll:
- self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
-
- self.hosts_to_poll = hosts
- if len(hosts)==0:
- self.completed = True
-
- return poll_results
-
- def wait(self, seconds, poll_interval):
- """ Wait a certain time for job completion, check status every poll_interval. """
- # jid is None when all hosts were skipped
- if not self.active:
- return self.results
-
- clock = seconds - poll_interval
- while (clock >= 0 and not self.completed):
- time.sleep(poll_interval)
-
- poll_results = self.poll()
-
- for (host, res) in poll_results['polled'].iteritems():
- if res.get('started'):
- self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
-
- clock = clock - poll_interval
-
- return self.results
diff --git a/v1/ansible/runner/return_data.py b/v1/ansible/runner/return_data.py
deleted file mode 100644
index 8cee506fde..0000000000
--- a/v1/ansible/runner/return_data.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-
-class ReturnData(object):
- ''' internal return class for runner execute methods, not part of public API signature '''
-
- __slots__ = [ 'result', 'comm_ok', 'host', 'diff' ]
-
- def __init__(self, conn=None, host=None, result=None,
- comm_ok=True, diff=dict()):
-
- # which host is this ReturnData about?
- if conn is not None:
- self.host = conn.host
- delegate = getattr(conn, 'delegate', None)
- if delegate is not None:
- self.host = delegate
-
- else:
- self.host = host
-
- self.result = result
- self.comm_ok = comm_ok
-
- # if these values are set and used with --diff we can show
- # changes made to particular files
- self.diff = diff
-
- if type(self.result) in [ str, unicode ]:
- self.result = utils.parse_json(self.result, from_remote=True, no_exceptions=True)
-
- if self.host is None:
- raise Exception("host not set")
- if type(self.result) != dict:
- raise Exception("dictionary result expected")
-
- def communicated_ok(self):
- return self.comm_ok
-
- def is_successful(self):
- return self.comm_ok and (self.result.get('failed', False) == False) and ('failed_when_result' in self.result and [not self.result['failed_when_result']] or [self.result.get('rc',0) == 0])[0]
-
diff --git a/v1/ansible/runner/shell_plugins/__init__.py b/v1/ansible/runner/shell_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/shell_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/shell_plugins/csh.py b/v1/ansible/runner/shell_plugins/csh.py
deleted file mode 100644
index 4e9f8c8af7..0000000000
--- a/v1/ansible/runner/shell_plugins/csh.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.runner.shell_plugins.sh import ShellModule as ShModule
-
-class ShellModule(ShModule):
-
- # How to end lines in a python script one-liner
- _SHELL_EMBEDDED_PY_EOL = '\\\n'
-
- def env_prefix(self, **kwargs):
- return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
diff --git a/v1/ansible/runner/shell_plugins/powershell.py b/v1/ansible/runner/shell_plugins/powershell.py
deleted file mode 100644
index 850b380edd..0000000000
--- a/v1/ansible/runner/shell_plugins/powershell.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-import re
-import random
-import shlex
-import time
-
-_common_args = ['PowerShell', '-NoProfile', '-NonInteractive']
-
-# Primarily for testing, allow explicitly specifying PowerShell version via
-# an environment variable.
-_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
-if _powershell_version:
- _common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
-
-def _escape(value, include_vars=False):
- '''Return value escaped for use in PowerShell command.'''
- # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
- # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
- subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
- ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
- ('\'', '`\''), ('`', '``'), ('\x00', '`0')]
- if include_vars:
- subs.append(('$', '`$'))
- pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
- substs = [s for p, s in subs]
- replace = lambda m: substs[m.lastindex - 1]
- return re.sub(pattern, replace, value)
-
-def _encode_script(script, as_list=False):
- '''Convert a PowerShell script to a single base64-encoded command.'''
- script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
- encoded_script = base64.b64encode(script.encode('utf-16-le'))
- cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
- if as_list:
- return cmd_parts
- return ' '.join(cmd_parts)
-
-def _build_file_cmd(cmd_parts, quote_args=True):
- '''Build command line to run a file, given list of file name plus args.'''
- if quote_args:
- cmd_parts = ['"%s"' % x for x in cmd_parts]
- return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + cmd_parts)
-
-class ShellModule(object):
-
- def env_prefix(self, **kwargs):
- return ''
-
- def join_path(self, *args):
- return os.path.join(*args).replace('/', '\\')
-
- def path_has_trailing_slash(self, path):
- # Allow Windows paths to be specified using either slash.
- return path.endswith('/') or path.endswith('\\')
-
- def chmod(self, mode, path):
- return ''
-
- def remove(self, path, recurse=False):
- path = _escape(path)
- if recurse:
- return _encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
- else:
- return _encode_script('''Remove-Item "%s" -Force;''' % path)
-
- def mkdtemp(self, basefile, system=False, mode=None):
- basefile = _escape(basefile)
- # FIXME: Support system temp path!
- return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
-
- def expand_user(self, user_home_path):
- # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
- # not seem to work remotely, though by default we are always starting
- # in the user's home directory.
- if user_home_path == '~':
- script = 'Write-Host (Get-Location).Path'
- elif user_home_path.startswith('~\\'):
- script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:])
- else:
- script = 'Write-Host "%s"' % _escape(user_home_path)
- return _encode_script(script)
-
- def checksum(self, path, python_interp):
- path = _escape(path)
- script = '''
- If (Test-Path -PathType Leaf "%(path)s")
- {
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
- $fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
- [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
- $fp.Dispose();
- }
- ElseIf (Test-Path -PathType Container "%(path)s")
- {
- Write-Host "3";
- }
- Else
- {
- Write-Host "1";
- }
- ''' % dict(path=path)
- return _encode_script(script)
-
- def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
- cmd = cmd.encode('utf-8')
- cmd_parts = shlex.split(cmd, posix=False)
- if not cmd_parts[0].lower().endswith('.ps1'):
- cmd_parts[0] = '%s.ps1' % cmd_parts[0]
- script = _build_file_cmd(cmd_parts, quote_args=False)
- if rm_tmp:
- rm_tmp = _escape(rm_tmp)
- script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp)
- return _encode_script(script)
diff --git a/v1/ansible/runner/shell_plugins/sh.py b/v1/ansible/runner/shell_plugins/sh.py
deleted file mode 100644
index 81810bcf8f..0000000000
--- a/v1/ansible/runner/shell_plugins/sh.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import pipes
-import ansible.constants as C
-
-_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
-
-class ShellModule(object):
-
- # How to end lines in a python script one-liner
- _SHELL_EMBEDDED_PY_EOL = '\n'
-
- def env_prefix(self, **kwargs):
- '''Build command prefix with environment variables.'''
- env = dict(
- LANG = C.DEFAULT_MODULE_LANG,
- LC_CTYPE = C.DEFAULT_MODULE_LANG,
- LC_MESSAGES = C.DEFAULT_MODULE_LANG,
- )
- env.update(kwargs)
- return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
-
- def join_path(self, *args):
- return os.path.join(*args)
-
- def path_has_trailing_slash(self, path):
- return path.endswith('/')
-
- def chmod(self, mode, path):
- path = pipes.quote(path)
- return 'chmod %s %s' % (mode, path)
-
- def remove(self, path, recurse=False):
- path = pipes.quote(path)
- if recurse:
- return "rm -rf %s >/dev/null 2>&1" % path
- else:
- return "rm -f %s >/dev/null 2>&1" % path
-
- def mkdtemp(self, basefile=None, system=False, mode=None):
- if not basefile:
- basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
- basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
- if system and basetmp.startswith('$HOME'):
- basetmp = self.join_path('/tmp', basefile)
- cmd = 'mkdir -p %s' % basetmp
- if mode:
- cmd += ' && chmod %s %s' % (mode, basetmp)
- cmd += ' && echo %s' % basetmp
- return cmd
-
- def expand_user(self, user_home_path):
- ''' Return a command to expand tildes in a path
-
- It can be either "~" or "~username". We use the POSIX definition of
- a username:
- http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
- http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
- '''
-
- # Check that the user_path to expand is safe
- if user_home_path != '~':
- if not _USER_HOME_PATH_RE.match(user_home_path):
- # pipes.quote will make the shell return the string verbatim
- user_home_path = pipes.quote(user_home_path)
- return 'echo %s' % user_home_path
-
- def checksum(self, path, python_interp):
- # The following test needs to be SH-compliant. BASH-isms will
- # not work if /bin/sh points to a non-BASH shell.
- #
- # In the following test, each condition is a check and logical
- # comparison (|| or &&) that sets the rc value. Every check is run so
- # the last check in the series to fail will be the rc that is
- # returned.
- #
- # If a check fails we error before invoking the hash functions because
- # hash functions may successfully take the hash of a directory on BSDs
- # (UFS filesystem?) which is not what the rest of the ansible code
- # expects
- #
- # If all of the available hashing methods fail we fail with an rc of
- # 0. This logic is added to the end of the cmd at the bottom of this
- # function.
-
- # Return codes:
- # checksum: success!
- # 0: Unknown error
- # 1: Remote file does not exist
- # 2: No read permissions on the file
- # 3: File is a directory
- # 4: No python interpreter
-
- # Quoting gets complex here. We're writing a python string that's
- # used by a variety of shells on the remote host to invoke a python
- # "one-liner".
- shell_escaped_path = pipes.quote(path)
- test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp)
- csums = [
- "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
- "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
- ]
-
- cmd = " || ".join(csums)
- cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path)
- return cmd
-
- def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
- cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
- new_cmd = " ".join(cmd_parts)
- if rm_tmp:
- new_cmd = '%s; rm -rf %s >/dev/null 2>&1' % (new_cmd, rm_tmp)
- return new_cmd
diff --git a/v1/ansible/utils/__init__.py b/v1/ansible/utils/__init__.py
deleted file mode 100644
index eb6fa2a712..0000000000
--- a/v1/ansible/utils/__init__.py
+++ /dev/null
@@ -1,1662 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import sys
-import re
-import os
-import shlex
-import yaml
-import copy
-import optparse
-import operator
-from ansible import errors
-from ansible import __version__
-from ansible.utils.display_functions import *
-from ansible.utils.plugins import *
-from ansible.utils.su_prompts import *
-from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
-from ansible.callbacks import display
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils.unicode import to_bytes, to_unicode
-import ansible.constants as C
-import ast
-import time
-import StringIO
-import stat
-import termios
-import tty
-import pipes
-import random
-import difflib
-import warnings
-import traceback
-import getpass
-import sys
-import subprocess
-import contextlib
-
-from vault import VaultLib
-
-VERBOSITY=0
-
-MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
-
-# caching the compilation of the regex used
-# to check for lookup calls within data
-LOOKUP_REGEX = re.compile(r'lookup\s*\(')
-PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
-CODE_REGEX = re.compile(r'(?:{%|%})')
-
-
-try:
- # simplejson can be much faster if it's available
- import simplejson as json
-except ImportError:
- import json
-
-try:
- from yaml import CSafeLoader as Loader
-except ImportError:
- from yaml import SafeLoader as Loader
-
-PASSLIB_AVAILABLE = False
-try:
- import passlib.hash
- PASSLIB_AVAILABLE = True
-except:
- pass
-
-try:
- import builtin
-except ImportError:
- import __builtin__ as builtin
-
-KEYCZAR_AVAILABLE=False
-try:
- try:
- # some versions of pycrypto may not have this?
- from Crypto.pct_warnings import PowmInsecureWarning
- except ImportError:
- PowmInsecureWarning = RuntimeWarning
-
- with warnings.catch_warnings(record=True) as warning_handler:
- warnings.simplefilter("error", PowmInsecureWarning)
- try:
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- except PowmInsecureWarning:
- system_warning(
- "The version of gmp you have installed has a known issue regarding " + \
- "timing vulnerabilities when used with pycrypto. " + \
- "If possible, you should update it (i.e. yum update gmp)."
- )
- warnings.resetwarnings()
- warnings.simplefilter("ignore")
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- KEYCZAR_AVAILABLE=True
-except ImportError:
- pass
-
-
-###############################################################
-# Abstractions around keyczar
-###############################################################
-
-def key_for_hostname(hostname):
- # fireball mode is an implementation of ansible firing up zeromq via SSH
- # to use no persistent daemons or key management
-
- if not KEYCZAR_AVAILABLE:
- raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
-
- key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
- if not os.path.exists(key_path):
- os.makedirs(key_path, mode=0700)
- os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
- elif not os.path.isdir(key_path):
- raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
-
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
-
- key_path = os.path.join(key_path, hostname)
-
- # use new AES keys every 2 hours, which means fireball must not allow running for longer either
- if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
- key = AesKey.Generate()
- fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
- fh = os.fdopen(fd, 'w')
- fh.write(str(key))
- fh.close()
- return key
- else:
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
- fh = open(key_path)
- key = AesKey.Read(fh.read())
- fh.close()
- return key
-
-def encrypt(key, msg):
- return key.Encrypt(msg)
-
-def decrypt(key, msg):
- try:
- return key.Decrypt(msg)
- except key_errors.InvalidSignatureError:
- raise errors.AnsibleError("decryption failed")
-
-###############################################################
-# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
-###############################################################
-
-def read_vault_file(vault_password_file):
- """Read a vault password from a file or if executable, execute the script and
- retrieve password from STDOUT
- """
- if vault_password_file:
- this_path = os.path.realpath(os.path.expanduser(vault_password_file))
- if is_executable(this_path):
- try:
- # STDERR not captured to make it easier for users to prompt for input in their scripts
- p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
- stdout, stderr = p.communicate()
- vault_pass = stdout.strip('\r\n')
- else:
- try:
- f = open(this_path, "rb")
- vault_pass=f.read().strip()
- f.close()
- except (OSError, IOError), e:
- raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
-
- return vault_pass
- else:
- return None
-
-def err(msg):
- ''' print an error message to stderr '''
-
- print >> sys.stderr, msg
-
-def exit(msg, rc=1):
- ''' quit with an error to stdout and a failure code '''
-
- err(msg)
- sys.exit(rc)
-
-def jsonify(result, format=False):
- ''' format JSON output (uncompressed or uncompressed) '''
-
- if result is None:
- return "{}"
- result2 = result.copy()
- for key, value in result2.items():
- if type(value) is str:
- result2[key] = value.decode('utf-8', 'ignore')
-
- indent = None
- if format:
- indent = 4
-
- try:
- return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
- except UnicodeDecodeError:
- return json.dumps(result2, sort_keys=True, indent=indent)
-
-def write_tree_file(tree, hostname, buf):
- ''' write something into treedir/hostname '''
-
- # TODO: might be nice to append playbook runs per host in a similar way
- # in which case, we'd want append mode.
- path = os.path.join(tree, hostname)
- fd = open(path, "w+")
- fd.write(buf)
- fd.close()
-
-def is_failed(result):
- ''' is a given JSON result a failed result? '''
-
- return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
-
-def is_changed(result):
- ''' is a given JSON result a changed result? '''
-
- return (result.get('changed', False) in [ True, 'True', 'true'])
-
-def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
- from ansible.utils import template
-
- if conditional is None or conditional == '':
- return True
-
- if isinstance(conditional, list):
- for x in conditional:
- if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
- return False
- return True
-
- if not isinstance(conditional, basestring):
- return conditional
-
- conditional = conditional.replace("jinja2_compare ","")
- # allow variable names
- if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
- conditional = to_unicode(inject[conditional], nonstring='simplerepr')
- conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
- original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
- # a Jinja2 evaluation that results in something Python can eval!
- presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- conditional = template.template(basedir, presented, inject)
- val = conditional.strip()
- if val == presented:
- # the templating failed, meaning most likely a
- # variable was undefined. If we happened to be
- # looking for an undefined variable, return True,
- # otherwise fail
- if "is undefined" in conditional:
- return True
- elif "is defined" in conditional:
- return False
- else:
- raise errors.AnsibleError("error while evaluating conditional: %s" % original)
- elif val == "True":
- return True
- elif val == "False":
- return False
- else:
- raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
-
-def is_executable(path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
-def unfrackpath(path):
- '''
- returns a path that is free of symlinks, environment
- variables, relative path traversals and symbols (~)
- example:
- '$HOME/../../var/mail' becomes '/var/spool/mail'
- '''
- return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
-
-def prepare_writeable_dir(tree,mode=0777):
- ''' make sure a directory exists and is writeable '''
-
- # modify the mode to ensure the owner at least
- # has read/write access to this directory
- mode |= 0700
-
- # make sure the tree path is always expanded
- # and normalized and free of symlinks
- tree = unfrackpath(tree)
-
- if not os.path.exists(tree):
- try:
- os.makedirs(tree, mode)
- except (IOError, OSError), e:
- raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
- if not os.access(tree, os.W_OK):
- raise errors.AnsibleError("Cannot write to path %s" % tree)
- return tree
-
-def path_dwim(basedir, given):
- '''
- make relative paths work like folks expect.
- '''
-
- if given.startswith("'"):
- given = given[1:-1]
-
- if given.startswith("/"):
- return os.path.abspath(given)
- elif given.startswith("~"):
- return os.path.abspath(os.path.expanduser(given))
- else:
- if basedir is None:
- basedir = "."
- return os.path.abspath(os.path.join(basedir, given))
-
-def path_dwim_relative(original, dirname, source, playbook_base, check=True):
- ''' find one file in a directory one level up in a dir named dirname relative to current '''
- # (used by roles code)
-
- from ansible.utils import template
-
-
- basedir = os.path.dirname(original)
- if os.path.islink(basedir):
- basedir = unfrackpath(basedir)
- template2 = os.path.join(basedir, dirname, source)
- else:
- template2 = os.path.join(basedir, '..', dirname, source)
- source2 = path_dwim(basedir, template2)
- if os.path.exists(source2):
- return source2
- obvious_local_path = path_dwim(playbook_base, source)
- if os.path.exists(obvious_local_path):
- return obvious_local_path
- if check:
- raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
- return source2 # which does not exist
-
-def repo_url_to_role_name(repo_url):
- # gets the role name out of a repo like
- # http://git.example.com/repos/repo.git" => "repo"
-
- if '://' not in repo_url and '@' not in repo_url:
- return repo_url
- trailing_path = repo_url.split('/')[-1]
- if trailing_path.endswith('.git'):
- trailing_path = trailing_path[:-4]
- if trailing_path.endswith('.tar.gz'):
- trailing_path = trailing_path[:-7]
- if ',' in trailing_path:
- trailing_path = trailing_path.split(',')[0]
- return trailing_path
-
-
-def role_spec_parse(role_spec):
- # takes a repo and a version like
- # git+http://git.example.com/repos/repo.git,v1.0
- # and returns a list of properties such as:
- # {
- # 'scm': 'git',
- # 'src': 'http://git.example.com/repos/repo.git',
- # 'version': 'v1.0',
- # 'name': 'repo'
- # }
-
- role_spec = role_spec.strip()
- role_version = ''
- default_role_versions = dict(git='master', hg='tip')
- if role_spec == "" or role_spec.startswith("#"):
- return (None, None, None, None)
-
- tokens = [s.strip() for s in role_spec.split(',')]
-
- # assume https://github.com URLs are git+https:// URLs and not
- # tarballs unless they end in '.zip'
- if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
- tokens[0] = 'git+' + tokens[0]
-
- if '+' in tokens[0]:
- (scm, role_url) = tokens[0].split('+')
- else:
- scm = None
- role_url = tokens[0]
- if len(tokens) >= 2:
- role_version = tokens[1]
- if len(tokens) == 3:
- role_name = tokens[2]
- else:
- role_name = repo_url_to_role_name(tokens[0])
- if scm and not role_version:
- role_version = default_role_versions.get(scm, '')
- return dict(scm=scm, src=role_url, version=role_version, name=role_name)
-
-
-def role_yaml_parse(role):
- if 'role' in role:
- # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
- role_info = role_spec_parse(role['role'])
- if isinstance(role_info, dict):
- # Warning: Slight change in behaviour here. name may be being
- # overloaded. Previously, name was only a parameter to the role.
- # Now it is both a parameter to the role and the name that
- # ansible-galaxy will install under on the local system.
- if 'name' in role and 'name' in role_info:
- del role_info['name']
- role.update(role_info)
- else:
- # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
- if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
- role["src"] = "git+" + role["src"]
-
- if '+' in role["src"]:
- (scm, src) = role["src"].split('+')
- role["scm"] = scm
- role["src"] = src
-
- if 'name' not in role:
- role["name"] = repo_url_to_role_name(role["src"])
-
- if 'version' not in role:
- role['version'] = ''
-
- if 'scm' not in role:
- role['scm'] = None
-
- return role
-
-
-def json_loads(data):
- ''' parse a JSON string and return a data structure '''
- try:
- loaded = json.loads(data)
- except ValueError,e:
- raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
-
- return loaded
-
-def _clean_data(orig_data, from_remote=False, from_inventory=False):
- ''' remove jinja2 template tags from a string '''
-
- if not isinstance(orig_data, basestring):
- return orig_data
-
- # when the data is marked as having come from a remote, we always
- # replace any print blocks (ie. {{var}}), however when marked as coming
- # from inventory we only replace print blocks that contain a call to
- # a lookup plugin (ie. {{lookup('foo','bar'))}})
- replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
-
- regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
-
- with contextlib.closing(StringIO.StringIO(orig_data)) as data:
- # these variables keep track of opening block locations, as we only
- # want to replace matched pairs of print/block tags
- print_openings = []
- block_openings = []
- for mo in regex.finditer(orig_data):
- token = mo.group(0)
- token_start = mo.start(0)
-
- if token[0] == '{':
- if token == '{%':
- block_openings.append(token_start)
- elif token == '{{':
- print_openings.append(token_start)
-
- elif token[1] == '}':
- prev_idx = None
- if token == '%}' and block_openings:
- prev_idx = block_openings.pop()
- elif token == '}}' and print_openings:
- prev_idx = print_openings.pop()
-
- if prev_idx is not None:
- # replace the opening
- data.seek(prev_idx, os.SEEK_SET)
- data.write('{#')
- # replace the closing
- data.seek(token_start, os.SEEK_SET)
- data.write('#}')
-
- else:
- assert False, 'Unhandled regex match'
-
- return data.getvalue()
-
-def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
- '''
- walk a complex data structure, and use _clean_data() to
- remove any template tags that may exist
- '''
- if not from_remote and not from_inventory:
- raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
- if isinstance(orig_data, dict):
- data = orig_data.copy()
- for key in data:
- new_key = _clean_data_struct(key, from_remote, from_inventory)
- new_val = _clean_data_struct(data[key], from_remote, from_inventory)
- if key != new_key:
- del data[key]
- data[new_key] = new_val
- elif isinstance(orig_data, list):
- data = orig_data[:]
- for i in range(0, len(data)):
- data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
- elif isinstance(orig_data, basestring):
- data = _clean_data(orig_data, from_remote, from_inventory)
- else:
- data = orig_data
- return data
-
-def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
- ''' this version for module return data only '''
-
- orig_data = raw_data
-
- # ignore stuff like tcgetattr spewage or other warnings
- data = filter_leading_non_json_lines(raw_data)
-
- try:
- results = json.loads(data)
- except:
- if no_exceptions:
- return dict(failed=True, parsed=False, msg=raw_data)
- else:
- raise
-
- if from_remote:
- results = _clean_data_struct(results, from_remote, from_inventory)
-
- return results
-
-def serialize_args(args):
- '''
- Flattens a dictionary args to a k=v string
- '''
- module_args = ""
- for (k,v) in args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- elif isinstance(v, bool):
- module_args = "%s=%s %s" % (k, str(v), module_args)
- return module_args.strip()
-
-def merge_module_args(current_args, new_args):
- '''
- merges either a dictionary or string of k=v pairs with another string of k=v pairs,
- and returns a new k=v string without duplicates.
- '''
- if not isinstance(current_args, basestring):
- raise errors.AnsibleError("expected current_args to be a basestring")
- # we use parse_kv to split up the current args into a dictionary
- final_args = parse_kv(current_args)
- if isinstance(new_args, dict):
- final_args.update(new_args)
- elif isinstance(new_args, basestring):
- new_args_kv = parse_kv(new_args)
- final_args.update(new_args_kv)
- return serialize_args(final_args)
-
-def parse_yaml(data, path_hint=None):
- ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
-
- stripped_data = data.lstrip()
- loaded = None
- if stripped_data.startswith("{") or stripped_data.startswith("["):
- # since the line starts with { or [ we can infer this is a JSON document.
- try:
- loaded = json.loads(data)
- except ValueError, ve:
- if path_hint:
- raise errors.AnsibleError(path_hint + ": " + str(ve))
- else:
- raise errors.AnsibleError(str(ve))
- else:
- # else this is pretty sure to be a YAML document
- loaded = yaml.load(data, Loader=Loader)
-
- return loaded
-
-def process_common_errors(msg, probline, column):
- replaced = probline.replace(" ","")
-
- if ":{{" in replaced and "}}" in replaced:
- msg = msg + """
-This one looks easy to fix. YAML thought it was looking for the start of a
-hash/dictionary and was confused to see a second "{". Most likely this was
-meant to be an ansible template evaluation instead, so we have to give the
-parser a small hint that we wanted a string instead. The solution here is to
-just quote the entire value.
-
-For instance, if the original line was:
-
- app_path: {{ base_path }}/foo
-
-It should be written as:
-
- app_path: "{{ base_path }}/foo"
-"""
- return msg
-
- elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
- msg = msg + """
-This one looks easy to fix. There seems to be an extra unquoted colon in the line
-and this is confusing the parser. It was only expecting to find one free
-colon. The solution is just add some quotes around the colon, or quote the
-entire line after the first colon.
-
-For instance, if the original line was:
-
- copy: src=file.txt dest=/path/filename:with_colon.txt
-
-It can be written as:
-
- copy: src=file.txt dest='/path/filename:with_colon.txt'
-
-Or:
-
- copy: 'src=file.txt dest=/path/filename:with_colon.txt'
-
-
-"""
- return msg
- else:
- parts = probline.split(":")
- if len(parts) > 1:
- middle = parts[1].strip()
- match = False
- unbalanced = False
- if middle.startswith("'") and not middle.endswith("'"):
- match = True
- elif middle.startswith('"') and not middle.endswith('"'):
- match = True
- if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
- unbalanced = True
- if match:
- msg = msg + """
-This one looks easy to fix. It seems that there is a value started
-with a quote, and the YAML parser is expecting to see the line ended
-with the same kind of quote. For instance:
-
- when: "ok" in result.stdout
-
-Could be written as:
-
- when: '"ok" in result.stdout'
-
-or equivalently:
-
- when: "'ok' in result.stdout"
-
-"""
- return msg
-
- if unbalanced:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-unbalanced quotes. If starting a value with a quote, make sure the
-line ends with the same set of quotes. For instance this arbitrary
-example:
-
- foo: "bad" "wolf"
-
-Could be written as:
-
- foo: '"bad" "wolf"'
-
-"""
- return msg
-
- return msg
-
-def process_yaml_error(exc, data, path=None, show_content=True):
- if hasattr(exc, 'problem_mark'):
- mark = exc.problem_mark
- if show_content:
- if mark.line -1 >= 0:
- before_probline = data.split("\n")[mark.line-1]
- else:
- before_probline = ''
- probline = data.split("\n")[mark.line]
- arrow = " " * mark.column + "^"
- msg = """Syntax Error while loading YAML script, %s
-Note: The error may actually appear before this position: line %s, column %s
-
-%s
-%s
-%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
-
- unquoted_var = None
- if '{{' in probline and '}}' in probline:
- if '"{{' not in probline or "'{{" not in probline:
- unquoted_var = True
-
- if not unquoted_var:
- msg = process_common_errors(msg, probline, mark.column)
- else:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-missing quotes. Always quote template expression brackets when they
-start a value. For instance:
-
- with_items:
- - {{ foo }}
-
-Should be written as:
-
- with_items:
- - "{{ foo }}"
-
-"""
- else:
- # most likely displaying a file with sensitive content,
- # so don't show any of the actual lines of yaml just the
- # line number itself
- msg = """Syntax error while loading YAML script, %s
-The error appears to have been on line %s, column %s, but may actually
-be before there depending on the exact syntax problem.
-""" % (path, mark.line + 1, mark.column + 1)
-
- else:
- # No problem markers means we have to throw a generic
- # "stuff messed up" type message. Sry bud.
- if path:
- msg = "Could not parse YAML. Check over %s again." % path
- else:
- msg = "Could not parse YAML."
- raise errors.AnsibleYAMLValidationFailed(msg)
-
-
-def parse_yaml_from_file(path, vault_password=None):
- ''' convert a yaml file to a data structure '''
-
- data = None
- show_content = True
-
- try:
- data = open(path).read()
- except IOError:
- raise errors.AnsibleError("file could not read: %s" % path)
-
- vault = VaultLib(password=vault_password)
- if vault.is_encrypted(data):
- # if the file is encrypted and no password was specified,
- # the decrypt call would throw an error, but we check first
- # since the decrypt function doesn't know the file name
- if vault_password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
- data = vault.decrypt(data)
- show_content = False
-
- try:
- return parse_yaml(data, path_hint=path)
- except yaml.YAMLError, exc:
- process_yaml_error(exc, data, path, show_content)
-
-def parse_kv(args):
- ''' convert a string of key/value items to a dict '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except ValueError, ve:
- if 'no closing quotation' in str(ve).lower():
- raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
- else:
- raise
- for x in vargs:
- if "=" in x:
- k, v = x.split("=",1)
- options[k.strip()] = unquote(v.strip())
- return options
-
-def _validate_both_dicts(a, b):
-
- if not (isinstance(a, dict) and isinstance(b, dict)):
- raise errors.AnsibleError(
- "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
- )
-
-def merge_hash(a, b):
- ''' recursively merges hash b into a
- keys from b take precedence over keys from a '''
-
- result = {}
-
- # we check here as well as in combine_vars() since this
- # function can work recursively with nested dicts
- _validate_both_dicts(a, b)
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = merge_hash(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
-
- return result
-
-def default(value, function):
- ''' syntactic sugar around lazy evaluation of defaults '''
- if value is None:
- return function()
- return value
-
-
-def _git_repo_info(repo_path):
- ''' returns a string containing git branch, commit id and commit date '''
- result = None
- if os.path.exists(repo_path):
- # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
- if os.path.isfile(repo_path):
- try:
- gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
- # There is a possibility the .git file to have an absolute path.
- if os.path.isabs(gitdir):
- repo_path = gitdir
- else:
- repo_path = os.path.join(repo_path[:-4], gitdir)
- except (IOError, AttributeError):
- return ''
- f = open(os.path.join(repo_path, "HEAD"))
- branch = f.readline().split('/')[-1].rstrip("\n")
- f.close()
- branch_path = os.path.join(repo_path, "refs", "heads", branch)
- if os.path.exists(branch_path):
- f = open(branch_path)
- commit = f.readline()[:10]
- f.close()
- else:
- # detached HEAD
- commit = branch[:10]
- branch = 'detached HEAD'
- branch_path = os.path.join(repo_path, "HEAD")
-
- date = time.localtime(os.stat(branch_path).st_mtime)
- if time.daylight == 0:
- offset = time.timezone
- else:
- offset = time.altzone
- result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
- time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
- else:
- result = ''
- return result
-
-
-def _gitinfo():
- basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
- repo_path = os.path.join(basedir, '.git')
- result = _git_repo_info(repo_path)
- submodules = os.path.join(basedir, '.gitmodules')
- if not os.path.exists(submodules):
- return result
- f = open(submodules)
- for line in f:
- tokens = line.strip().split(' ')
- if tokens[0] == 'path':
- submodule_path = tokens[2]
- submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
- if not submodule_info:
- submodule_info = ' not found - use git submodule update --init ' + submodule_path
- result += "\n {0}: {1}".format(submodule_path, submodule_info)
- f.close()
- return result
-
-
-def version(prog):
- result = "{0} {1}".format(prog, __version__)
- gitinfo = _gitinfo()
- if gitinfo:
- result = result + " {0}".format(gitinfo)
- result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
- return result
-
-def version_info(gitinfo=False):
- if gitinfo:
- # expensive call, user with care
- ansible_version_string = version('')
- else:
- ansible_version_string = __version__
- ansible_version = ansible_version_string.split()[0]
- ansible_versions = ansible_version.split('.')
- for counter in range(len(ansible_versions)):
- if ansible_versions[counter] == "":
- ansible_versions[counter] = 0
- try:
- ansible_versions[counter] = int(ansible_versions[counter])
- except:
- pass
- if len(ansible_versions) < 3:
- for counter in range(len(ansible_versions), 3):
- ansible_versions.append(0)
- return {'string': ansible_version_string.strip(),
- 'full': ansible_version,
- 'major': ansible_versions[0],
- 'minor': ansible_versions[1],
- 'revision': ansible_versions[2]}
-
-def getch():
- ''' read in a single character '''
- fd = sys.stdin.fileno()
- old_settings = termios.tcgetattr(fd)
- try:
- tty.setraw(sys.stdin.fileno())
- ch = sys.stdin.read(1)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
- return ch
-
-def sanitize_output(arg_string):
- ''' strips private info out of a string '''
-
- private_keys = ('password', 'login_password')
-
- output = []
- for part in arg_string.split():
- try:
- (k, v) = part.split('=', 1)
- except ValueError:
- v = heuristic_log_sanitize(part)
- output.append(v)
- continue
-
- if k in private_keys:
- v = 'VALUE_HIDDEN'
- else:
- v = heuristic_log_sanitize(v)
- output.append('%s=%s' % (k, v))
-
- output = ' '.join(output)
- return output
-
-
-####################################################################
-# option handling code for /usr/bin/ansible and ansible-playbook
-# below this line
-
-class SortedOptParser(optparse.OptionParser):
- '''Optparser which sorts the options by opt before outputting --help'''
-
- def format_help(self, formatter=None):
- self.option_list.sort(key=operator.methodcaller('get_opt_string'))
- return optparse.OptionParser.format_help(self, formatter=None)
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
- ''' create an options parser for any ansible script '''
-
- parser = SortedOptParser(usage, version=version("%prog"))
- parser.add_option('-v','--verbose', default=False, action="callback",
- callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
-
- parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
- help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
- default=constants.DEFAULT_HOST_LIST)
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
- help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
- parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
- help='ask for SSH password')
- parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
- help='use this file to authenticate the connection')
- parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
- help='ask for vault password')
- parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
- dest='vault_password_file', help="vault password file")
- parser.add_option('--list-hosts', dest='listhosts', action='store_true',
- help='outputs a list of matching hosts; does not execute anything else')
- parser.add_option('-M', '--module-path', dest='module_path',
- help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
- default=None)
-
- if subset_opts:
- parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
- help='further limit selected hosts to an additional pattern')
-
- parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
- dest='timeout',
- help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
-
- if output_opts:
- parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
- help='condense output')
- parser.add_option('-t', '--tree', dest='tree', default=None,
- help='log output to this directory')
-
- if runas_opts:
- # priv user defaults to root later on to enable detecting when this option was given here
- parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password (deprecated, use become)')
- parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
- help='ask for su password (deprecated, use become)')
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
- help="run operations with sudo (nopasswd) (deprecated, use become)")
- parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root) (deprecated, use become)')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
- help='run operations with su (deprecated, use become)')
- parser.add_option('-R', '--su-user', default=None,
- help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
-
- # consolidated privilege escalation (become)
- parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
- help="run operations with become (nopasswd implied)")
- parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
- help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
- parser.add_option('--become-user', default=None, dest='become_user', type='string',
- help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
- parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
- help='ask for privilege escalation password')
-
-
- if connect_opts:
- parser.add_option('-c', '--connection', dest='connection',
- default=constants.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
-
- if async_opts:
- parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
- dest='poll_interval',
- help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
- parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
- help='run asynchronously, failing after X seconds (default=N/A)')
-
- if check_opts:
- parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
- help="don't make any changes; instead, try to predict some of the changes that may occur"
- )
-
- if diff_opts:
- parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
- help="when changing (small) files and templates, show the differences in those files; works great with --check"
- )
-
- return parser
-
-def parse_extra_vars(extra_vars_opts, vault_pass):
- extra_vars = {}
- for extra_vars_opt in extra_vars_opts:
- extra_vars_opt = to_unicode(extra_vars_opt)
- if extra_vars_opt.startswith(u"@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
- elif extra_vars_opt and extra_vars_opt[0] in u'[{':
- # Arguments as YAML
- extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
- else:
- # Arguments as Key-value
- extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
- return extra_vars
-
-def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
-
- vault_pass = None
- new_vault_pass = None
-
- if ask_vault_pass:
- vault_pass = getpass.getpass(prompt="Vault password: ")
-
- if ask_vault_pass and confirm_vault:
- vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
- if vault_pass != vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- if ask_new_vault_pass:
- new_vault_pass = getpass.getpass(prompt="New Vault password: ")
-
- if ask_new_vault_pass and confirm_new:
- new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
- if new_vault_pass != new_vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- # enforce no newline chars at the end of passwords
- if vault_pass:
- vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
- if new_vault_pass:
- new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
-
- return vault_pass, new_vault_pass
-
-def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
- sshpass = None
- becomepass = None
- vaultpass = None
- become_prompt = ''
-
- if ask_pass:
- sshpass = getpass.getpass(prompt="SSH password: ")
- become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
- if sshpass:
- sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- else:
- become_prompt = "%s password: " % become_method.upper()
-
- if become_ask_pass:
- becomepass = getpass.getpass(prompt=become_prompt)
- if ask_pass and becomepass == '':
- becomepass = sshpass
- if becomepass:
- becomepass = to_bytes(becomepass)
-
- if ask_vault_pass:
- vaultpass = getpass.getpass(prompt="Vault password: ")
- if vaultpass:
- vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
-
- return (sshpass, becomepass, vaultpass)
-
-
-def choose_pass_prompt(options):
-
- if options.ask_su_pass:
- return 'su'
- elif options.ask_sudo_pass:
- return 'sudo'
-
- return options.become_method
-
-def normalize_become_options(options):
-
- options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
- options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
-
- if options.become:
- pass
- elif options.sudo:
- options.become = True
- options.become_method = 'sudo'
- elif options.su:
- options.become = True
- options.become_method = 'su'
-
-
-def do_encrypt(result, encrypt, salt_size=None, salt=None):
- if PASSLIB_AVAILABLE:
- try:
- crypt = getattr(passlib.hash, encrypt)
- except:
- raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
-
- if salt_size:
- result = crypt.encrypt(result, salt_size=salt_size)
- elif salt:
- result = crypt.encrypt(result, salt=salt)
- else:
- result = crypt.encrypt(result)
- else:
- raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
-
- return result
-
-def last_non_blank_line(buf):
-
- all_lines = buf.splitlines()
- all_lines.reverse()
- for line in all_lines:
- if (len(line) > 0):
- return line
- # shouldn't occur unless there's no output
- return ""
-
-def filter_leading_non_json_lines(buf):
- '''
- used to avoid random output from SSH at the top of JSON output, like messages from
- tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
-
- need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
- filter only leading lines since multiline JSON is valid.
- '''
-
- filtered_lines = StringIO.StringIO()
- stop_filtering = False
- for line in buf.splitlines():
- if stop_filtering or line.startswith('{') or line.startswith('['):
- stop_filtering = True
- filtered_lines.write(line + '\n')
- return filtered_lines.getvalue()
-
-def boolean(value):
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
- """
- helper function for connection plugins to create privilege escalation commands
- """
-
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'BECOME-SUCCESS-%s' % randbits
- prompt = None
- becomecmd = None
-
- shell = shell or '$SHELL'
-
- if method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = exe or C.DEFAULT_SUDO_EXE
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'su':
- exe = exe or C.DEFAULT_SU_EXE
- flags = flags or C.DEFAULT_SU_FLAGS
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'pbrun':
- prompt = 'assword:'
- exe = exe or 'pbrun'
- flags = flags or ''
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- elif method == 'pfexec':
- exe = exe or 'pfexec'
- flags = flags or ''
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- if becomecmd is None:
- raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
-
- return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
-
-
-def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
- """
- helper function for connection plugins to create sudo commands
- """
- return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
-
-
-def make_su_cmd(su_user, executable, cmd):
- """
- Helper function for connection plugins to create direct su commands
- """
- return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
-
-def get_diff(diff):
- # called by --diff usage in playbook and runner via callbacks
- # include names in diffs 'before' and 'after' and do diff -U 10
-
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- ret = []
- if 'dst_binary' in diff:
- ret.append("diff skipped: destination file appears to be binary\n")
- if 'src_binary' in diff:
- ret.append("diff skipped: source file appears to be binary\n")
- if 'dst_larger' in diff:
- ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
- if 'src_larger' in diff:
- ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
- if 'before' in diff and 'after' in diff:
- if 'before_header' in diff:
- before_header = "before: %s" % diff['before_header']
- else:
- before_header = 'before'
- if 'after_header' in diff:
- after_header = "after: %s" % diff['after_header']
- else:
- after_header = 'after'
- differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
- for line in list(differ):
- ret.append(line)
- return u"".join(ret)
- except UnicodeDecodeError:
- return ">> the files are different, but the diff library cannot compare unicode strings"
-
-def is_list_of_strings(items):
- for x in items:
- if not isinstance(x, basestring):
- return False
- return True
-
-def list_union(a, b):
- result = []
- for x in a:
- if x not in result:
- result.append(x)
- for x in b:
- if x not in result:
- result.append(x)
- return result
-
-def list_intersection(a, b):
- result = []
- for x in a:
- if x in b and x not in result:
- result.append(x)
- return result
-
-def list_difference(a, b):
- result = []
- for x in a:
- if x not in b and x not in result:
- result.append(x)
- for x in b:
- if x not in a and x not in result:
- result.append(x)
- return result
-
-def contains_vars(data):
- '''
- returns True if the data contains a variable pattern
- '''
- return "$" in data or "{{" in data
-
-def safe_eval(expr, locals={}, include_exceptions=False):
- '''
- This is intended for allowing things like:
- with_items: a_list_variable
-
- Where Jinja2 would return a string but we do not want to allow it to
- call functions (outside of Jinja2, where the env is constrained). If
- the input data to this function came from an untrusted (remote) source,
- it should first be run through _clean_data_struct() to ensure the data
- is further sanitized prior to evaluation.
-
- Based on:
- http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
- '''
-
- # this is the whitelist of AST nodes we are going to
- # allow in the evaluation. Any node type other than
- # those listed here will raise an exception in our custom
- # visitor class defined below.
- SAFE_NODES = set(
- (
- ast.Add,
- ast.BinOp,
- ast.Call,
- ast.Compare,
- ast.Dict,
- ast.Div,
- ast.Expression,
- ast.List,
- ast.Load,
- ast.Mult,
- ast.Num,
- ast.Name,
- ast.Str,
- ast.Sub,
- ast.Tuple,
- ast.UnaryOp,
- )
- )
-
- # AST node types were expanded after 2.6
- if not sys.version.startswith('2.6'):
- SAFE_NODES.union(
- set(
- (ast.Set,)
- )
- )
-
- filter_list = []
- for filter in filter_loader.all():
- filter_list.extend(filter.filters().keys())
-
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
-
- class CleansingNodeVisitor(ast.NodeVisitor):
- def generic_visit(self, node, inside_call=False):
- if type(node) not in SAFE_NODES:
- raise Exception("invalid expression (%s)" % expr)
- elif isinstance(node, ast.Call):
- inside_call = True
- elif isinstance(node, ast.Name) and inside_call:
- if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
- raise Exception("invalid function: %s" % node.id)
- # iterate over all child nodes
- for child_node in ast.iter_child_nodes(node):
- self.generic_visit(child_node, inside_call)
-
- if not isinstance(expr, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (expr, None)
- return expr
-
- cnv = CleansingNodeVisitor()
- try:
- parsed_tree = ast.parse(expr, mode='eval')
- cnv.visit(parsed_tree)
- compiled = compile(parsed_tree, expr, 'eval')
- result = eval(compiled, {}, locals)
-
- if include_exceptions:
- return (result, None)
- else:
- return result
- except SyntaxError, e:
- # special handling for syntax errors, we just return
- # the expression string back as-is
- if include_exceptions:
- return (expr, None)
- return expr
- except Exception, e:
- if include_exceptions:
- return (expr, e)
- return expr
-
-
-def listify_lookup_plugin_terms(terms, basedir, inject):
-
- from ansible.utils import template
-
- if isinstance(terms, basestring):
- # someone did:
- # with_items: alist
- # OR
- # with_items: {{ alist }}
-
- stripped = terms.strip()
- if not (stripped.startswith('{') or stripped.startswith('[')) and \
- not stripped.startswith("/") and \
- not stripped.startswith('set([') and \
- not LOOKUP_REGEX.search(terms):
- # if not already a list, get ready to evaluate with Jinja2
- # not sure why the "/" is in above code :)
- try:
- new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
- if isinstance(new_terms, basestring) and "{{" in new_terms:
- pass
- else:
- terms = new_terms
- except:
- pass
-
- if '{' in terms or '[' in terms:
- # Jinja2 already evaluated a variable to a list.
- # Jinja2-ified list needs to be converted back to a real type
- # TODO: something a bit less heavy than eval
- return safe_eval(terms)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- return terms
-
-def combine_vars(a, b):
-
- _validate_both_dicts(a, b)
-
- if C.DEFAULT_HASH_BEHAVIOUR == "merge":
- return merge_hash(a, b)
- else:
- return dict(a.items() + b.items())
-
-def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
- '''Return a random password string of length containing only chars.'''
-
- password = []
- while len(password) < length:
- new_char = os.urandom(1)
- if new_char in chars:
- password.append(new_char)
-
- return ''.join(password)
-
-def before_comment(msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
-def load_vars(basepath, results, vault_password=None):
- """
- Load variables from any potential yaml filename combinations of basepath,
- returning result.
- """
-
- paths_to_check = [ "".join([basepath, ext])
- for ext in C.YAML_FILENAME_EXTENSIONS ]
-
- found_paths = []
-
- for path in paths_to_check:
- found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- if found:
- found_paths.append(path)
-
-
- # disallow the potentially confusing situation that there are multiple
- # variable files for the same name. For example if both group_vars/all.yml
- # and group_vars/all.yaml
- if len(found_paths) > 1:
- raise errors.AnsibleError("Multiple variable files found. "
- "There should only be one. %s" % ( found_paths, ))
-
- return results
-
-## load variables from yaml files/dirs
-# e.g. host/group_vars
-#
-def _load_vars_from_path(path, results, vault_password=None):
- """
- Robustly access the file at path and load variables, carefully reporting
- errors in a friendly/informative way.
-
- Return the tuple (found, new_results, )
- """
-
- try:
- # in the case of a symbolic link, we want the stat of the link itself,
- # not its target
- pathstat = os.lstat(path)
- except os.error, err:
- # most common case is that nothing exists at that path.
- if err.errno == errno.ENOENT:
- return False, results
- # otherwise this is a condition we should report to the user
- raise errors.AnsibleError(
- "%s is not accessible: %s."
- " Please check its permissions." % ( path, err.strerror))
-
- # symbolic link
- if stat.S_ISLNK(pathstat.st_mode):
- try:
- target = os.path.realpath(path)
- except os.error, err2:
- raise errors.AnsibleError("The symbolic link at %s "
- "is not readable: %s. Please check its permissions."
- % (path, err2.strerror, ))
- # follow symbolic link chains by recursing, so we repeat the same
- # permissions checks above and provide useful errors.
- return _load_vars_from_path(target, results, vault_password)
-
- # directory
- if stat.S_ISDIR(pathstat.st_mode):
-
- # support organizing variables across multiple files in a directory
- return True, _load_vars_from_folder(path, results, vault_password=vault_password)
-
- # regular file
- elif stat.S_ISREG(pathstat.st_mode):
- data = parse_yaml_from_file(path, vault_password=vault_password)
- if data and type(data) != dict:
- raise errors.AnsibleError(
- "%s must be stored as a dictionary/hash" % path)
- elif data is None:
- data = {}
-
- # combine vars overrides by default but can be configured to do a
- # hash merge in settings
- results = combine_vars(results, data)
- return True, results
-
- # something else? could be a fifo, socket, device, etc.
- else:
- raise errors.AnsibleError("Expected a variable file or directory "
- "but found a non-file object at path %s" % (path, ))
-
-def _load_vars_from_folder(folder_path, results, vault_password=None):
- """
- Load all variables within a folder recursively.
- """
-
- # this function and _load_vars_from_path are mutually recursive
-
- try:
- names = os.listdir(folder_path)
- except os.error, err:
- raise errors.AnsibleError(
- "This folder cannot be listed: %s: %s."
- % ( folder_path, err.strerror))
-
- # evaluate files in a stable order rather than whatever order the
- # filesystem lists them.
- names.sort()
-
- # do not parse hidden files or dirs, e.g. .svn/
- paths = [os.path.join(folder_path, name) for name in names
- if not name.startswith('.')
- and os.path.splitext(name)[1] in C.YAML_FILENAME_EXTENSIONS]
- for path in paths:
- _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- return results
-
-def update_hash(hash, key, new_value):
- ''' used to avoid nested .update calls on the parent '''
-
- value = hash.get(key, {})
- value.update(new_value)
- hash[key] = value
-
-def censor_unlogged_data(data):
- '''
- used when the no_log: True attribute is passed to a task to keep data from a callback.
- NOT intended to prevent variable registration, but only things from showing up on
- screen
- '''
- new_data = {}
- for (x,y) in data.iteritems():
- if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
- new_data[x] = y
- new_data['censored'] = 'results hidden due to no_log parameter'
- return new_data
-
-def check_mutually_exclusive_privilege(options, parser):
-
- # privilege escalation command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
- (options.su or options.su_user or options.ask_su_pass) and \
- (options.become or options.become_user or options.become_ask_pass) or \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
- (options.become or options.become_user or options.become_ask_pass):
-
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
- "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
- " are exclusive of each other")
-
-
diff --git a/v1/ansible/utils/cmd_functions.py b/v1/ansible/utils/cmd_functions.py
deleted file mode 100644
index 6525260f10..0000000000
--- a/v1/ansible/utils/cmd_functions.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-import shlex
-import subprocess
-import select
-
-def run_cmd(cmd, live=False, readsize=10):
-
- #readsize = 10
-
- cmdargs = shlex.split(cmd)
- p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), readsize)
- if live:
- sys.stdout.write(dat)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), readsize)
- stderr += dat
- if live:
- sys.stdout.write(dat)
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if we've emptied the pipes, or there is nothing to
- # read from and the process has finished.
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # Calling wait while there are still pipes to read can cause a lock
- elif not rpipes and p.poll() == None:
- p.wait()
-
- return p.returncode, stdout, stderr
diff --git a/v1/ansible/utils/display_functions.py b/v1/ansible/utils/display_functions.py
deleted file mode 100644
index 2233c81657..0000000000
--- a/v1/ansible/utils/display_functions.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import textwrap
-
-from ansible import constants as C
-from ansible import errors
-from ansible.callbacks import display
-
-__all__ = ['deprecated', 'warning', 'system_warning']
-
-# list of all deprecation messages to prevent duplicate display
-deprecations = {}
-warns = {}
-
-def deprecated(msg, version, removed=False):
- ''' used to print out a deprecation message.'''
-
- if not removed and not C.DEPRECATION_WARNINGS:
- return
-
- if not removed:
- if version:
- new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
- else:
- new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
- new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
- else:
- raise errors.AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
-
- wrapped = textwrap.wrap(new_msg, 79)
- new_msg = "\n".join(wrapped) + "\n"
-
- if new_msg not in deprecations:
- display(new_msg, color='purple', stderr=True)
- deprecations[new_msg] = 1
-
-def warning(msg):
- new_msg = "\n[WARNING]: %s" % msg
- wrapped = textwrap.wrap(new_msg, 79)
- new_msg = "\n".join(wrapped) + "\n"
- if new_msg not in warns:
- display(new_msg, color='bright purple', stderr=True)
- warns[new_msg] = 1
-
-def system_warning(msg):
- if C.SYSTEM_WARNINGS:
- warning(msg)
-
diff --git a/v1/ansible/utils/hashing.py b/v1/ansible/utils/hashing.py
deleted file mode 100644
index a7d142e5bd..0000000000
--- a/v1/ansible/utils/hashing.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-
-# Note, sha1 is the only hash algorithm compatible with python2.4 and with
-# FIPS-140 mode (as of 11-2014)
-try:
- from hashlib import sha1 as sha1
-except ImportError:
- from sha import sha as sha1
-
-# Backwards compat only
-try:
- from hashlib import md5 as _md5
-except ImportError:
- try:
- from md5 import md5 as _md5
- except ImportError:
- # Assume we're running in FIPS mode here
- _md5 = None
-
-def secure_hash_s(data, hash_func=sha1):
- ''' Return a secure hash hex digest of data. '''
-
- digest = hash_func()
- try:
- digest.update(data)
- except UnicodeEncodeError:
- digest.update(data.encode('utf-8'))
- return digest.hexdigest()
-
-def secure_hash(filename, hash_func=sha1):
- ''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
-
- if not os.path.exists(filename) or os.path.isdir(filename):
- return None
- digest = hash_func()
- blocksize = 64 * 1024
- try:
- infile = open(filename, 'rb')
- block = infile.read(blocksize)
- while block:
- digest.update(block)
- block = infile.read(blocksize)
- infile.close()
- except IOError, e:
- raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
- return digest.hexdigest()
-
-# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
-checksum = secure_hash
-checksum_s = secure_hash_s
-
-# Backwards compat functions. Some modules include md5s in their return values
-# Continue to support that for now. As of ansible-1.8, all of those modules
-# should also return "checksum" (sha1 for now)
-# Do not use md5 unless it is needed for:
-# 1) Optional backwards compatibility
-# 2) Compliance with a third party protocol
-#
-# MD5 will not work on systems which are FIPS-140-2 compliant.
-
-def md5s(data):
- if not _md5:
- raise ValueError('MD5 not available. Possibly running in FIPS mode')
- return secure_hash_s(data, _md5)
-
-def md5(filename):
- if not _md5:
- raise ValueError('MD5 not available. Possibly running in FIPS mode')
- return secure_hash(filename, _md5)
-
diff --git a/v1/ansible/utils/module_docs.py b/v1/ansible/utils/module_docs.py
deleted file mode 100644
index c692057172..0000000000
--- a/v1/ansible/utils/module_docs.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import sys
-import ast
-import yaml
-import traceback
-
-from collections import MutableMapping, MutableSet, MutableSequence
-
-from ansible import utils
-
-# modules that are ok that they do not have documentation strings
-BLACKLIST_MODULES = [
- 'async_wrapper', 'accelerate', 'async_status'
-]
-
-def get_docstring(filename, verbose=False):
- """
- Search for assignment of the DOCUMENTATION and EXAMPLES variables
- in the given file.
- Parse DOCUMENTATION from YAML and return the YAML doc or None
- together with EXAMPLES, as plain text.
-
- DOCUMENTATION can be extended using documentation fragments
- loaded by the PluginLoader from the module_docs_fragments
- directory.
- """
-
- doc = None
- plainexamples = None
- returndocs = None
-
- try:
- # Thank you, Habbie, for this bit of code :-)
- M = ast.parse(''.join(open(filename)))
- for child in M.body:
- if isinstance(child, ast.Assign):
- if 'DOCUMENTATION' in (t.id for t in child.targets):
- doc = yaml.safe_load(child.value.s)
- fragment_slug = doc.get('extends_documentation_fragment',
- 'doesnotexist').lower()
-
- # Allow the module to specify a var other than DOCUMENTATION
- # to pull the fragment from, using dot notation as a separator
- if '.' in fragment_slug:
- fragment_name, fragment_var = fragment_slug.split('.', 1)
- fragment_var = fragment_var.upper()
- else:
- fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
-
-
- if fragment_slug != 'doesnotexist':
- fragment_class = utils.plugins.fragment_loader.get(fragment_name)
- assert fragment_class is not None
-
- fragment_yaml = getattr(fragment_class, fragment_var, '{}')
- fragment = yaml.safe_load(fragment_yaml)
-
- if fragment.has_key('notes'):
- notes = fragment.pop('notes')
- if notes:
- if not doc.has_key('notes'):
- doc['notes'] = []
- doc['notes'].extend(notes)
-
- if 'options' not in fragment.keys():
- raise Exception("missing options in fragment, possibly misformatted?")
-
- for key, value in fragment.items():
- if not doc.has_key(key):
- doc[key] = value
- else:
- if isinstance(doc[key], MutableMapping):
- doc[key].update(value)
- elif isinstance(doc[key], MutableSet):
- doc[key].add(value)
- elif isinstance(doc[key], MutableSequence):
- doc[key] = sorted(frozenset(doc[key] + value))
- else:
- raise Exception("Attempt to extend a documentation fragement of unknown type")
-
- if 'EXAMPLES' in (t.id for t in child.targets):
- plainexamples = child.value.s[1:] # Skip first empty line
-
- if 'RETURN' in (t.id for t in child.targets):
- returndocs = child.value.s[1:]
- except:
- traceback.print_exc() # temp
- if verbose == True:
- traceback.print_exc()
- print "unable to parse %s" % filename
- return doc, plainexamples, returndocs
-
diff --git a/v1/ansible/utils/module_docs_fragments b/v1/ansible/utils/module_docs_fragments
deleted file mode 120000
index 83aef9ec19..0000000000
--- a/v1/ansible/utils/module_docs_fragments
+++ /dev/null
@@ -1 +0,0 @@
-../../../lib/ansible/utils/module_docs_fragments \ No newline at end of file
diff --git a/v1/ansible/utils/plugins.py b/v1/ansible/utils/plugins.py
deleted file mode 100644
index 14953d8f44..0000000000
--- a/v1/ansible/utils/plugins.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import sys
-import glob
-import imp
-from ansible import constants as C
-from ansible import errors
-
-MODULE_CACHE = {}
-PATH_CACHE = {}
-PLUGIN_PATH_CACHE = {}
-_basedirs = []
-
-def push_basedir(basedir):
- # avoid pushing the same absolute dir more than once
- basedir = os.path.realpath(basedir)
- if basedir not in _basedirs:
- _basedirs.insert(0, basedir)
-
-class PluginLoader(object):
-
- '''
- PluginLoader loads plugins from the configured plugin directories.
-
- It searches for plugins by iterating through the combined list of
- play basedirs, configured paths, and the python path.
- The first match is used.
- '''
-
- def __init__(self, class_name, package, config, subdir, aliases={}):
-
- self.class_name = class_name
- self.package = package
- self.config = config
- self.subdir = subdir
- self.aliases = aliases
-
- if not class_name in MODULE_CACHE:
- MODULE_CACHE[class_name] = {}
- if not class_name in PATH_CACHE:
- PATH_CACHE[class_name] = None
- if not class_name in PLUGIN_PATH_CACHE:
- PLUGIN_PATH_CACHE[class_name] = {}
-
- self._module_cache = MODULE_CACHE[class_name]
- self._paths = PATH_CACHE[class_name]
- self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
-
- self._extra_dirs = []
- self._searched_paths = set()
-
- def print_paths(self):
- ''' Returns a string suitable for printing of the search path '''
-
- # Uses a list to get the order right
- ret = []
- for i in self._get_paths():
- if i not in ret:
- ret.append(i)
- return os.pathsep.join(ret)
-
- def _all_directories(self, dir):
- results = []
- results.append(dir)
- for root, subdirs, files in os.walk(dir):
- if '__init__.py' in files:
- for x in subdirs:
- results.append(os.path.join(root,x))
- return results
-
- def _get_package_paths(self):
- ''' Gets the path of a Python package '''
-
- paths = []
- if not self.package:
- return []
- if not hasattr(self, 'package_path'):
- m = __import__(self.package)
- parts = self.package.split('.')[1:]
- self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
- paths.extend(self._all_directories(self.package_path))
- return paths
-
- def _get_paths(self):
- ''' Return a list of paths to search for plugins in '''
-
- if self._paths is not None:
- return self._paths
-
- ret = self._extra_dirs[:]
- for basedir in _basedirs:
- fullpath = os.path.realpath(os.path.join(basedir, self.subdir))
- if os.path.isdir(fullpath):
-
- files = glob.glob("%s/*" % fullpath)
-
- # allow directories to be two levels deep
- files2 = glob.glob("%s/*/*" % fullpath)
-
- if files2 is not None:
- files.extend(files2)
-
- for file in files:
- if os.path.isdir(file) and file not in ret:
- ret.append(file)
- if fullpath not in ret:
- ret.append(fullpath)
-
- # look in any configured plugin paths, allow one level deep for subcategories
- if self.config is not None:
- configured_paths = self.config.split(os.pathsep)
- for path in configured_paths:
- path = os.path.realpath(os.path.expanduser(path))
- contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
- for c in contents:
- if os.path.isdir(c) and c not in ret:
- ret.append(c)
- if path not in ret:
- ret.append(path)
-
- # look for any plugins installed in the package subtree
- ret.extend(self._get_package_paths())
-
- # cache and return the result
- self._paths = ret
- return ret
-
-
- def add_directory(self, directory, with_subdir=False):
- ''' Adds an additional directory to the search path '''
-
- directory = os.path.realpath(directory)
-
- if directory is not None:
- if with_subdir:
- directory = os.path.join(directory, self.subdir)
- if directory not in self._extra_dirs:
- # append the directory and invalidate the path cache
- self._extra_dirs.append(directory)
- self._paths = None
-
- def find_plugin(self, name, suffixes=None):
- ''' Find a plugin named name '''
-
- if not suffixes:
- if self.class_name:
- suffixes = ['.py']
- else:
- suffixes = ['.py', '']
-
- potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
- for full_name in potential_names:
- if full_name in self._plugin_path_cache:
- return self._plugin_path_cache[full_name]
-
- found = None
- for path in [p for p in self._get_paths() if p not in self._searched_paths]:
- if os.path.isdir(path):
- full_paths = (os.path.join(path, f) for f in os.listdir(path))
- for full_path in (f for f in full_paths if os.path.isfile(f)):
- for suffix in suffixes:
- if full_path.endswith(suffix):
- full_name = os.path.basename(full_path)
- break
- else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
- continue
-
- if full_name not in self._plugin_path_cache:
- self._plugin_path_cache[full_name] = full_path
-
- self._searched_paths.add(path)
- for full_name in potential_names:
- if full_name in self._plugin_path_cache:
- return self._plugin_path_cache[full_name]
-
- # if nothing is found, try finding alias/deprecated
- if not name.startswith('_'):
- for alias_name in ('_%s' % n for n in potential_names):
- # We've already cached all the paths at this point
- if alias_name in self._plugin_path_cache:
- return self._plugin_path_cache[alias_name]
-
- return None
-
- def has_plugin(self, name):
- ''' Checks if a plugin named name exists '''
-
- return self.find_plugin(name) is not None
-
- __contains__ = has_plugin
-
- def get(self, name, *args, **kwargs):
- ''' instantiates a plugin of the given name using arguments '''
-
- if name in self.aliases:
- name = self.aliases[name]
- path = self.find_plugin(name)
- if path is None:
- return None
- if path not in self._module_cache:
- self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- return getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
-
- def all(self, *args, **kwargs):
- ''' instantiates all plugins with the same arguments '''
-
- for i in self._get_paths():
- matches = glob.glob(os.path.join(i, "*.py"))
- matches.sort()
- for path in matches:
- name, ext = os.path.splitext(os.path.basename(path))
- if name.startswith("_"):
- continue
- if path not in self._module_cache:
- self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
-
-action_loader = PluginLoader(
- 'ActionModule',
- 'ansible.runner.action_plugins',
- C.DEFAULT_ACTION_PLUGIN_PATH,
- 'action_plugins'
-)
-
-cache_loader = PluginLoader(
- 'CacheModule',
- 'ansible.cache',
- C.DEFAULT_CACHE_PLUGIN_PATH,
- 'cache_plugins'
-)
-
-callback_loader = PluginLoader(
- 'CallbackModule',
- 'ansible.callback_plugins',
- C.DEFAULT_CALLBACK_PLUGIN_PATH,
- 'callback_plugins'
-)
-
-connection_loader = PluginLoader(
- 'Connection',
- 'ansible.runner.connection_plugins',
- C.DEFAULT_CONNECTION_PLUGIN_PATH,
- 'connection_plugins',
- aliases={'paramiko': 'paramiko_ssh'}
-)
-
-shell_loader = PluginLoader(
- 'ShellModule',
- 'ansible.runner.shell_plugins',
- 'shell_plugins',
- 'shell_plugins',
-)
-
-module_finder = PluginLoader(
- '',
- 'ansible.modules',
- C.DEFAULT_MODULE_PATH,
- 'library'
-)
-
-lookup_loader = PluginLoader(
- 'LookupModule',
- 'ansible.runner.lookup_plugins',
- C.DEFAULT_LOOKUP_PLUGIN_PATH,
- 'lookup_plugins'
-)
-
-vars_loader = PluginLoader(
- 'VarsModule',
- 'ansible.inventory.vars_plugins',
- C.DEFAULT_VARS_PLUGIN_PATH,
- 'vars_plugins'
-)
-
-filter_loader = PluginLoader(
- 'FilterModule',
- 'ansible.runner.filter_plugins',
- C.DEFAULT_FILTER_PLUGIN_PATH,
- 'filter_plugins'
-)
-
-fragment_loader = PluginLoader(
- 'ModuleDocFragment',
- 'ansible.utils.module_docs_fragments',
- os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
- '',
-)
diff --git a/v1/ansible/utils/string_functions.py b/v1/ansible/utils/string_functions.py
deleted file mode 100644
index 3b452718f7..0000000000
--- a/v1/ansible/utils/string_functions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-def isprintable(instring):
- if isinstance(instring, str):
- #http://stackoverflow.com/a/3637294
- import string
- printset = set(string.printable)
- isprintable = set(instring).issubset(printset)
- return isprintable
- else:
- return True
-
-def count_newlines_from_end(str):
- i = len(str)
- while i > 0:
- if str[i-1] != '\n':
- break
- i -= 1
- return len(str) - i
-
diff --git a/v1/ansible/utils/su_prompts.py b/v1/ansible/utils/su_prompts.py
deleted file mode 100644
index 04e98e1c45..0000000000
--- a/v1/ansible/utils/su_prompts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-SU_PROMPT_LOCALIZATIONS = [
- 'Password',
- '암호',
- 'パスワード',
- 'Adgangskode',
- 'Contraseña',
- 'Contrasenya',
- 'Hasło',
- 'Heslo',
- 'Jelszó',
- 'Lösenord',
- 'Mật khẩu',
- 'Mot de passe',
- 'Parola',
- 'Parool',
- 'Pasahitza',
- 'Passord',
- 'Passwort',
- 'Salasana',
- 'Sandi',
- 'Senha',
- 'Wachtwoord',
- 'ססמה',
- 'Лозинка',
- 'Парола',
- 'Пароль',
- 'गुप्तशब्द',
- 'शब्दकूट',
- 'సంకేతపదము',
- 'හස්පදය',
- '密码',
- '密碼',
-]
-
-SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
-
-def check_su_prompt(data):
- return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
-
diff --git a/v1/ansible/utils/template.py b/v1/ansible/utils/template.py
deleted file mode 100644
index 368b2067c3..0000000000
--- a/v1/ansible/utils/template.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import codecs
-import jinja2
-from jinja2.runtime import StrictUndefined
-from jinja2.exceptions import TemplateSyntaxError
-import yaml
-import json
-from ansible import errors
-import ansible.constants as C
-import time
-import subprocess
-import datetime
-import pwd
-import ast
-import traceback
-from numbers import Number
-from types import NoneType
-
-from ansible.utils.string_functions import count_newlines_from_end
-from ansible.utils import to_bytes, to_unicode
-
-class Globals(object):
-
- FILTERS = None
-
- def __init__(self):
- pass
-
-def _get_filters():
- ''' return filter plugin instances '''
-
- if Globals.FILTERS is not None:
- return Globals.FILTERS
-
- from ansible import utils
- plugins = [ x for x in utils.plugins.filter_loader.all()]
- filters = {}
- for fp in plugins:
- filters.update(fp.filters())
- Globals.FILTERS = filters
-
- return Globals.FILTERS
-
-def _get_extensions():
- ''' return jinja2 extensions to load '''
-
- '''
- if some extensions are set via jinja_extensions in ansible.cfg, we try
- to load them with the jinja environment
- '''
- jinja_exts = []
- if C.DEFAULT_JINJA2_EXTENSIONS:
- '''
- Let's make sure the configuration directive doesn't contain spaces
- and split extensions in an array
- '''
- jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
-
- return jinja_exts
-
-class Flags:
- LEGACY_TEMPLATE_WARNING = False
-
-# TODO: refactor this file
-
-FILTER_PLUGINS = None
-_LISTRE = re.compile(r"(\w+)\[(\d+)\]")
-
-# A regex for checking to see if a variable we're trying to
-# expand is just a single variable name.
-SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
-
-JINJA2_OVERRIDE = '#jinja2:'
-JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
-
-def lookup(name, *args, **kwargs):
- from ansible import utils
- instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get('basedir',None))
- tvars = kwargs.get('vars', None)
-
- wantlist = kwargs.pop('wantlist', False)
-
- if instance is not None:
- try:
- ran = instance.run(*args, inject=tvars, **kwargs)
- except errors.AnsibleError:
- raise
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
- except Exception, e:
- raise errors.AnsibleError('Unexpected error in during lookup: %s' % e)
- if ran and not wantlist:
- ran = ",".join(ran)
- return ran
- else:
- raise errors.AnsibleError("lookup plugin (%s) not found" % name)
-
-def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True):
- ''' templates a data structure by traversing it and substituting for other data structures '''
- from ansible import utils
- try:
- if convert_bare and isinstance(varname, basestring):
- first_part = varname.split(".")[0].split("[")[0]
- if first_part in templatevars and '{{' not in varname and '$' not in varname:
- varname = "{{%s}}" % varname
-
- if isinstance(varname, basestring):
- if '{{' in varname or '{%' in varname:
- try:
- varname = template_from_string(basedir, varname, templatevars, fail_on_undefined)
- except errors.AnsibleError, e:
- raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e)))
-
- # template_from_string may return non strings for the case where the var is just
- # a reference to a single variable, so we should re_check before we do further evals
- if isinstance(varname, basestring):
- if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["):
- eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True)
- if eval_results[1] is None:
- varname = eval_results[0]
-
- return varname
-
- elif isinstance(varname, (list, tuple)):
- return [template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) for v in varname]
- elif isinstance(varname, dict):
- d = {}
- for (k, v) in varname.iteritems():
- d[k] = template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal)
- return d
- else:
- return varname
- except errors.AnsibleFilterError:
- if filter_fatal:
- raise
- else:
- return varname
-
-
-class _jinja2_vars(object):
- '''
- Helper class to template all variable content before jinja2 sees it.
- This is done by hijacking the variable storage that jinja2 uses, and
- overriding __contains__ and __getitem__ to look like a dict. Added bonus
- is avoiding duplicating the large hashes that inject tends to be.
- To facilitate using builtin jinja2 things like range, globals are handled
- here.
- extras is a list of locals to also search for variables.
- '''
-
- def __init__(self, basedir, vars, globals, fail_on_undefined, *extras):
- self.basedir = basedir
- self.vars = vars
- self.globals = globals
- self.fail_on_undefined = fail_on_undefined
- self.extras = extras
-
- def __contains__(self, k):
- if k in self.vars:
- return True
- for i in self.extras:
- if k in i:
- return True
- if k in self.globals:
- return True
- return False
-
- def __getitem__(self, varname):
- from ansible.runner import HostVars
- if varname not in self.vars:
- for i in self.extras:
- if varname in i:
- return i[varname]
- if varname in self.globals:
- return self.globals[varname]
- else:
- raise KeyError("undefined variable: %s" % varname)
- var = self.vars[varname]
- # HostVars is special, return it as-is, as is the special variable
- # 'vars', which contains the vars structure
- var = to_unicode(var, nonstring="passthru")
- if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
- return var
- else:
- return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
-
- def add_locals(self, locals):
- '''
- If locals are provided, create a copy of self containing those
- locals in addition to what is already in this variable proxy.
- '''
- if locals is None:
- return self
- return _jinja2_vars(self.basedir, self.vars, self.globals, self.fail_on_undefined, locals, *self.extras)
-
-class J2Template(jinja2.environment.Template):
- '''
- This class prevents Jinja2 from running _jinja2_vars through dict()
- Without this, {% include %} and similar will create new contexts unlike
- the special one created in template_from_file. This ensures they are all
- alike, except for potential locals.
- '''
- def new_context(self, vars=None, shared=False, locals=None):
- return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
-
-def template_from_file(basedir, path, vars, vault_password=None):
- ''' run a file through the templating engine '''
-
- fail_on_undefined = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
-
- from ansible import utils
- realpath = utils.path_dwim(basedir, path)
- loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)])
-
- def my_lookup(*args, **kwargs):
- kwargs['vars'] = vars
- return lookup(*args, basedir=basedir, **kwargs)
- def my_finalize(thing):
- return thing if thing is not None else ''
-
- environment = jinja2.Environment(loader=loader, trim_blocks=True, extensions=_get_extensions())
- environment.filters.update(_get_filters())
- environment.globals['lookup'] = my_lookup
- environment.globals['finalize'] = my_finalize
- if fail_on_undefined:
- environment.undefined = StrictUndefined
-
- try:
- data = codecs.open(realpath, encoding="utf8").read()
- except UnicodeDecodeError:
- raise errors.AnsibleError("unable to process as utf-8: %s" % realpath)
- except:
- raise errors.AnsibleError("unable to read %s" % realpath)
-
- # Get jinja env overrides from template
- if data.startswith(JINJA2_OVERRIDE):
- eol = data.find('\n')
- line = data[len(JINJA2_OVERRIDE):eol]
- data = data[eol+1:]
- for pair in line.split(','):
- (key,val) = pair.split(':')
- key = key.strip()
- if key in JINJA2_ALLOWED_OVERRIDES:
- setattr(environment, key, ast.literal_eval(val.strip()))
-
-
- environment.template_class = J2Template
- try:
- t = environment.from_string(data)
- except TemplateSyntaxError, e:
- # Throw an exception which includes a more user friendly error message
- values = {'name': realpath, 'lineno': e.lineno, 'error': str(e)}
- msg = 'file: %(name)s, line number: %(lineno)s, error: %(error)s' % \
- values
- error = errors.AnsibleError(msg)
- raise error
- vars = vars.copy()
- try:
- template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name
- except:
- template_uid = os.stat(realpath).st_uid
- vars['template_host'] = os.uname()[1]
- vars['template_path'] = realpath
- vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath))
- vars['template_uid'] = template_uid
- vars['template_fullpath'] = os.path.abspath(realpath)
- vars['template_run_date'] = datetime.datetime.now()
-
- managed_default = C.DEFAULT_MANAGED_STR
- managed_str = managed_default.format(
- host = vars['template_host'],
- uid = vars['template_uid'],
- file = to_bytes(vars['template_path'])
- )
- vars['ansible_managed'] = time.strftime(
- managed_str,
- time.localtime(os.path.getmtime(realpath))
- )
-
- # This line performs deep Jinja2 magic that uses the _jinja2_vars object for vars
- # Ideally, this could use some API where setting shared=True and the object won't get
- # passed through dict(o), but I have not found that yet.
- try:
- res = jinja2.utils.concat(t.root_render_func(t.new_context(_jinja2_vars(basedir, vars, t.globals, fail_on_undefined), shared=True)))
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
- except jinja2.exceptions.TemplateNotFound, e:
- # Throw an exception which includes a more user friendly error message
- # This likely will happen for included sub-template. Not that besides
- # pure "file not found" it may happen due to Jinja2's "security"
- # checks on path.
- values = {'name': realpath, 'subname': str(e)}
- msg = 'file: %(name)s, error: Cannot find/not allowed to load (include) template %(subname)s' % \
- values
- error = errors.AnsibleError(msg)
- raise error
-
- # The low level calls above do not preserve the newline
- # characters at the end of the input data, so we use the
- # calculate the difference in newlines and append them
- # to the resulting output for parity
- res_newlines = count_newlines_from_end(res)
- data_newlines = count_newlines_from_end(data)
- if data_newlines > res_newlines:
- res += '\n' * (data_newlines - res_newlines)
-
- if isinstance(res, unicode):
- # do not try to re-template a unicode string
- result = res
- else:
- result = template(basedir, res, vars)
-
- return result
-
-def template_from_string(basedir, data, vars, fail_on_undefined=False):
- ''' run a string through the (Jinja2) templating engine '''
- try:
- if type(data) == str:
- data = unicode(data, 'utf-8')
-
- # Check to see if the string we are trying to render is just referencing a single
- # var. In this case we don't want to accidentally change the type of the variable
- # to a string by using the jinja template renderer. We just want to pass it.
- only_one = SINGLE_VAR.match(data)
- if only_one:
- var_name = only_one.group(1)
- if var_name in vars:
- resolved_val = vars[var_name]
- if isinstance(resolved_val, (bool, Number, NoneType)):
- return resolved_val
-
- def my_finalize(thing):
- return thing if thing is not None else ''
-
- environment = jinja2.Environment(trim_blocks=True, undefined=StrictUndefined, extensions=_get_extensions(), finalize=my_finalize)
- environment.filters.update(_get_filters())
- environment.template_class = J2Template
-
- if '_original_file' in vars:
- basedir = os.path.dirname(vars['_original_file'])
- filesdir = os.path.abspath(os.path.join(basedir, '..', 'files'))
- if os.path.exists(filesdir):
- basedir = filesdir
-
- # 6227
- if isinstance(data, unicode):
- try:
- data = data.decode('utf-8')
- except UnicodeEncodeError, e:
- pass
-
- try:
- t = environment.from_string(data)
- except TemplateSyntaxError, e:
- raise errors.AnsibleError("template error while templating string: %s" % str(e))
- except Exception, e:
- if 'recursion' in str(e):
- raise errors.AnsibleError("recursive loop detected in template string: %s" % data)
- else:
- return data
-
- def my_lookup(*args, **kwargs):
- kwargs['vars'] = vars
- return lookup(*args, basedir=basedir, **kwargs)
-
- t.globals['lookup'] = my_lookup
- t.globals['finalize'] = my_finalize
- jvars =_jinja2_vars(basedir, vars, t.globals, fail_on_undefined)
- new_context = t.new_context(jvars, shared=True)
- rf = t.root_render_func(new_context)
- try:
- res = jinja2.utils.concat(rf)
- except TypeError, te:
- if 'StrictUndefined' in str(te):
- raise errors.AnsibleUndefinedVariable(
- "Unable to look up a name or access an attribute in template string. " + \
- "Make sure your variable name does not contain invalid characters like '-'."
- )
- else:
- raise errors.AnsibleError("an unexpected type error occurred. Error was %s" % te)
- return res
- except (jinja2.exceptions.UndefinedError, errors.AnsibleUndefinedVariable):
- if fail_on_undefined:
- raise
- else:
- return data
-
diff --git a/v1/ansible/utils/unicode.py b/v1/ansible/utils/unicode.py
deleted file mode 100644
index 7bd035c007..0000000000
--- a/v1/ansible/utils/unicode.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# (c) 2012-2014, Toshio Kuraotmi <a.badger@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-# to_bytes and to_unicode were written by Toshio Kuratomi for the
-# python-kitchen library https://pypi.python.org/pypi/kitchen
-# They are licensed in kitchen under the terms of the GPLv2+
-# They were copied and modified for use in ansible by Toshio in Jan 2015
-# (simply removing the deprecated features)
-
-#: Aliases for the utf-8 codec
-_UTF8_ALIASES = frozenset(('utf-8', 'UTF-8', 'utf8', 'UTF8', 'utf_8', 'UTF_8',
- 'utf', 'UTF', 'u8', 'U8'))
-#: Aliases for the latin-1 codec
-_LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
- 'latin', 'LATIN', 'l1', 'L1', 'cp819', 'CP819', '8859', 'iso8859-1',
- 'ISO8859-1', 'iso-8859-1', 'ISO-8859-1'))
-
-# EXCEPTION_CONVERTERS is defined below due to using to_unicode
-
-def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
- '''Convert an object into a :class:`unicode` string
-
- :arg obj: Object to convert to a :class:`unicode` string. This should
- normally be a byte :class:`str`
- :kwarg encoding: What encoding to try converting the byte :class:`str` as.
- Defaults to :term:`utf-8`
- :kwarg errors: If errors are found while decoding, perform this action.
- Defaults to ``replace`` which replaces the invalid bytes with
- a character that means the bytes were unable to be decoded. Other
- values are the same as the error handling schemes in the `codec base
- classes
- <http://docs.python.org/library/codecs.html#codec-base-classes>`_.
- For instance ``strict`` which raises an exception and ``ignore`` which
- simply omits the non-decodable characters.
- :kwarg nonstring: How to treat nonstring values. Possible values are:
-
- :simplerepr: Attempt to call the object's "simple representation"
- method and return that value. Python-2.3+ has two methods that
- try to return a simple representation: :meth:`object.__unicode__`
- and :meth:`object.__str__`. We first try to get a usable value
- from :meth:`object.__unicode__`. If that fails we try the same
- with :meth:`object.__str__`.
- :empty: Return an empty :class:`unicode` string
- :strict: Raise a :exc:`TypeError`
- :passthru: Return the object unchanged
- :repr: Attempt to return a :class:`unicode` string of the repr of the
- object
-
- Default is ``simplerepr``
-
- :raises TypeError: if :attr:`nonstring` is ``strict`` and
- a non-:class:`basestring` object is passed in or if :attr:`nonstring`
- is set to an unknown value
- :raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and
- :attr:`obj` is not decodable using the given encoding
- :returns: :class:`unicode` string or the original object depending on the
- value of :attr:`nonstring`.
-
- Usually this should be used on a byte :class:`str` but it can take both
- byte :class:`str` and :class:`unicode` strings intelligently. Nonstring
- objects are handled in different ways depending on the setting of the
- :attr:`nonstring` parameter.
-
- The default values of this function are set so as to always return
- a :class:`unicode` string and never raise an error when converting from
- a byte :class:`str` to a :class:`unicode` string. However, when you do
- not pass validly encoded text (or a nonstring object), you may end up with
- output that you don't expect. Be sure you understand the requirements of
- your data, not just ignore errors by passing it through this function.
- '''
- # Could use isbasestring/isunicode here but we want this code to be as
- # fast as possible
- if isinstance(obj, basestring):
- if isinstance(obj, unicode):
- return obj
- if encoding in _UTF8_ALIASES:
- return unicode(obj, 'utf-8', errors)
- if encoding in _LATIN1_ALIASES:
- return unicode(obj, 'latin-1', errors)
- return obj.decode(encoding, errors)
-
- if not nonstring:
- nonstring = 'simplerepr'
- if nonstring == 'empty':
- return u''
- elif nonstring == 'passthru':
- return obj
- elif nonstring == 'simplerepr':
- try:
- simple = obj.__unicode__()
- except (AttributeError, UnicodeError):
- simple = None
- if not simple:
- try:
- simple = str(obj)
- except UnicodeError:
- try:
- simple = obj.__str__()
- except (UnicodeError, AttributeError):
- simple = u''
- if isinstance(simple, str):
- return unicode(simple, encoding, errors)
- return simple
- elif nonstring in ('repr', 'strict'):
- obj_repr = repr(obj)
- if isinstance(obj_repr, str):
- obj_repr = unicode(obj_repr, encoding, errors)
- if nonstring == 'repr':
- return obj_repr
- raise TypeError('to_unicode was given "%(obj)s" which is neither'
- ' a byte string (str) or a unicode string' %
- {'obj': obj_repr.encode(encoding, 'replace')})
-
- raise TypeError('nonstring value, %(param)s, is not set to a valid'
- ' action' % {'param': nonstring})
-
-def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
- '''Convert an object into a byte :class:`str`
-
- :arg obj: Object to convert to a byte :class:`str`. This should normally
- be a :class:`unicode` string.
- :kwarg encoding: Encoding to use to convert the :class:`unicode` string
- into a byte :class:`str`. Defaults to :term:`utf-8`.
- :kwarg errors: If errors are found while encoding, perform this action.
- Defaults to ``replace`` which replaces the invalid bytes with
- a character that means the bytes were unable to be encoded. Other
- values are the same as the error handling schemes in the `codec base
- classes
- <http://docs.python.org/library/codecs.html#codec-base-classes>`_.
- For instance ``strict`` which raises an exception and ``ignore`` which
- simply omits the non-encodable characters.
- :kwarg nonstring: How to treat nonstring values. Possible values are:
-
- :simplerepr: Attempt to call the object's "simple representation"
- method and return that value. Python-2.3+ has two methods that
- try to return a simple representation: :meth:`object.__unicode__`
- and :meth:`object.__str__`. We first try to get a usable value
- from :meth:`object.__str__`. If that fails we try the same
- with :meth:`object.__unicode__`.
- :empty: Return an empty byte :class:`str`
- :strict: Raise a :exc:`TypeError`
- :passthru: Return the object unchanged
- :repr: Attempt to return a byte :class:`str` of the :func:`repr` of the
- object
-
- Default is ``simplerepr``.
-
- :raises TypeError: if :attr:`nonstring` is ``strict`` and
- a non-:class:`basestring` object is passed in or if :attr:`nonstring`
- is set to an unknown value.
- :raises UnicodeEncodeError: if :attr:`errors` is ``strict`` and all of the
- bytes of :attr:`obj` are unable to be encoded using :attr:`encoding`.
- :returns: byte :class:`str` or the original object depending on the value
- of :attr:`nonstring`.
-
- .. warning::
-
- If you pass a byte :class:`str` into this function the byte
- :class:`str` is returned unmodified. It is **not** re-encoded with
- the specified :attr:`encoding`. The easiest way to achieve that is::
-
- to_bytes(to_unicode(text), encoding='utf-8')
-
- The initial :func:`to_unicode` call will ensure text is
- a :class:`unicode` string. Then, :func:`to_bytes` will turn that into
- a byte :class:`str` with the specified encoding.
-
- Usually, this should be used on a :class:`unicode` string but it can take
- either a byte :class:`str` or a :class:`unicode` string intelligently.
- Nonstring objects are handled in different ways depending on the setting
- of the :attr:`nonstring` parameter.
-
- The default values of this function are set so as to always return a byte
- :class:`str` and never raise an error when converting from unicode to
- bytes. However, when you do not pass an encoding that can validly encode
- the object (or a non-string object), you may end up with output that you
- don't expect. Be sure you understand the requirements of your data, not
- just ignore errors by passing it through this function.
- '''
- # Could use isbasestring, isbytestring here but we want this to be as fast
- # as possible
- if isinstance(obj, basestring):
- if isinstance(obj, str):
- return obj
- return obj.encode(encoding, errors)
- if not nonstring:
- nonstring = 'simplerepr'
-
- if nonstring == 'empty':
- return ''
- elif nonstring == 'passthru':
- return obj
- elif nonstring == 'simplerepr':
- try:
- simple = str(obj)
- except UnicodeError:
- try:
- simple = obj.__str__()
- except (AttributeError, UnicodeError):
- simple = None
- if not simple:
- try:
- simple = obj.__unicode__()
- except (AttributeError, UnicodeError):
- simple = ''
- if isinstance(simple, unicode):
- simple = simple.encode(encoding, 'replace')
- return simple
- elif nonstring in ('repr', 'strict'):
- try:
- obj_repr = obj.__repr__()
- except (AttributeError, UnicodeError):
- obj_repr = ''
- if isinstance(obj_repr, unicode):
- obj_repr = obj_repr.encode(encoding, errors)
- else:
- obj_repr = str(obj_repr)
- if nonstring == 'repr':
- return obj_repr
- raise TypeError('to_bytes was given "%(obj)s" which is neither'
- ' a unicode string or a byte string (str)' % {'obj': obj_repr})
-
- raise TypeError('nonstring value, %(param)s, is not set to a valid'
- ' action' % {'param': nonstring})
-
-
-# force the return value of a function to be unicode. Use with partial to
-# ensure that a filter will return unicode values.
-def unicode_wrap(func, *args, **kwargs):
- return to_unicode(func(*args, **kwargs), nonstring='passthru')
diff --git a/v1/ansible/utils/vault.py b/v1/ansible/utils/vault.py
deleted file mode 100644
index 842688a2c1..0000000000
--- a/v1/ansible/utils/vault.py
+++ /dev/null
@@ -1,585 +0,0 @@
-# (c) 2014, James Tanner <tanner.jc@gmail.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-
-import os
-import shlex
-import shutil
-import tempfile
-from io import BytesIO
-from subprocess import call
-from ansible import errors
-from hashlib import sha256
-
-# Note: Only used for loading obsolete VaultAES files. All files are written
-# using the newer VaultAES256 which does not require md5
-try:
- from hashlib import md5
-except ImportError:
- try:
- from md5 import md5
- except ImportError:
- # MD5 unavailable. Possibly FIPS mode
- md5 = None
-
-from binascii import hexlify
-from binascii import unhexlify
-from ansible import constants as C
-
-try:
- from Crypto.Hash import SHA256, HMAC
- HAS_HASH = True
-except ImportError:
- HAS_HASH = False
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
-
-HEADER='$ANSIBLE_VAULT'
-CIPHER_WHITELIST=['AES', 'AES256']
-
-class VaultLib(object):
-
- def __init__(self, password):
- self.password = password
- self.cipher_name = None
- self.version = '1.1'
-
- def is_encrypted(self, data):
- if data.startswith(HEADER):
- return True
- else:
- return False
-
- def encrypt(self, data):
-
- if self.is_encrypted(data):
- raise errors.AnsibleError("data is already encrypted")
-
- if not self.cipher_name:
- self.cipher_name = "AES256"
- #raise errors.AnsibleError("the cipher must be set before encrypting data")
-
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- """
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
- """
-
- # encrypt sha + data
- enc_data = this_cipher.encrypt(data, self.password)
-
- # add header
- tmp_data = self._add_header(enc_data)
- return tmp_data
-
- def decrypt(self, data):
- if self.password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt data")
-
- if not self.is_encrypted(data):
- raise errors.AnsibleError("data is not encrypted")
-
- # clean out header
- data = self._split_header(data)
-
- # create the cipher object
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- # try to unencrypt data
- data = this_cipher.decrypt(data, self.password)
- if data is None:
- raise errors.AnsibleError("Decryption failed")
-
- return data
-
- def _add_header(self, data):
- # combine header and encrypted data in 80 char columns
-
- #tmpdata = hexlify(data)
- tmpdata = [data[i:i+80] for i in range(0, len(data), 80)]
-
- if not self.cipher_name:
- raise errors.AnsibleError("the cipher must be set before adding a header")
-
- dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n"
-
- for l in tmpdata:
- dirty_data += l + '\n'
-
- return dirty_data
-
-
- def _split_header(self, data):
- # used by decrypt
-
- tmpdata = data.split('\n')
- tmpheader = tmpdata[0].strip().split(';')
-
- self.version = str(tmpheader[1].strip())
- self.cipher_name = str(tmpheader[2].strip())
- clean_data = '\n'.join(tmpdata[1:])
-
- """
- # strip out newline, join, unhex
- clean_data = [ x.strip() for x in clean_data ]
- clean_data = unhexlify(''.join(clean_data))
- """
-
- return clean_data
-
- def __enter__(self):
- return self
-
- def __exit__(self, *err):
- pass
-
-class VaultEditor(object):
- # uses helper methods for write_file(self, filename, data)
- # to write a file so that code isn't duplicated for simple
- # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
- # ... "Don't Repeat Yourself", etc.
-
- def __init__(self, cipher_name, password, filename):
- # instantiates a member variable for VaultLib
- self.cipher_name = cipher_name
- self.password = password
- self.filename = filename
-
- def _edit_file_helper(self, existing_data=None, cipher=None):
- # make sure the umask is set to a sane value
- old_umask = os.umask(0o077)
-
- # Create a tempfile
- _, tmp_path = tempfile.mkstemp()
-
- if existing_data:
- self.write_data(existing_data, tmp_path)
-
- # drop the user into an editor on the tmp file
- try:
- call(self._editor_shell_command(tmp_path))
- except OSError, e:
- raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e)))
- tmpdata = self.read_data(tmp_path)
-
- # create new vault
- this_vault = VaultLib(self.password)
- if cipher:
- this_vault.cipher_name = cipher
-
- # encrypt new data and write out to tmp
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, tmp_path)
-
- # shuffle tmp file into place
- self.shuffle_files(tmp_path, self.filename)
-
- # and restore umask
- os.umask(old_umask)
-
- def create_file(self):
- """ create a new encrypted file """
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if os.path.isfile(self.filename):
- raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
-
- # Let the user specify contents and save file
- self._edit_file_helper(cipher=self.cipher_name)
-
- def decrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- if this_vault.is_encrypted(tmpdata):
- dec_data = this_vault.decrypt(tmpdata)
- if dec_data is None:
- raise errors.AnsibleError("Decryption failed")
- else:
- self.write_data(dec_data, self.filename)
- else:
- raise errors.AnsibleError("%s is not encrypted" % self.filename)
-
- def edit_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # let the user edit the data and save
- self._edit_file_helper(existing_data=dec_data)
- ###we want the cipher to default to AES256 (get rid of files
- # encrypted with the AES cipher)
- #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name)
-
-
- def view_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
- old_umask = os.umask(0o077)
- _, tmp_path = tempfile.mkstemp()
- self.write_data(dec_data, tmp_path)
- os.umask(old_umask)
-
- # drop the user into pager on the tmp file
- call(self._pager_shell_command(tmp_path))
- os.remove(tmp_path)
-
- def encrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- this_vault.cipher_name = self.cipher_name
- if not this_vault.is_encrypted(tmpdata):
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, self.filename)
- else:
- raise errors.AnsibleError("%s is already encrypted" % self.filename)
-
- def rekey_file(self, new_password):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # create new vault
- new_vault = VaultLib(new_password)
-
- # we want to force cipher to the default
- #new_vault.cipher_name = this_vault.cipher_name
-
- # re-encrypt data and re-write file
- enc_data = new_vault.encrypt(dec_data)
- self.write_data(enc_data, self.filename)
-
- def read_data(self, filename):
- f = open(filename, "rb")
- tmpdata = f.read()
- f.close()
- return tmpdata
-
- def write_data(self, data, filename):
- if os.path.isfile(filename):
- os.remove(filename)
- f = open(filename, "wb")
- f.write(data)
- f.close()
-
- def shuffle_files(self, src, dest):
- # overwrite dest with src
- if os.path.isfile(dest):
- os.remove(dest)
- shutil.move(src, dest)
-
- def _editor_shell_command(self, filename):
- EDITOR = os.environ.get('EDITOR','vim')
- editor = shlex.split(EDITOR)
- editor.append(filename)
-
- return editor
-
- def _pager_shell_command(self, filename):
- PAGER = os.environ.get('PAGER','less')
- pager = shlex.split(PAGER)
- pager.append(filename)
-
- return pager
-
-########################################
-# CIPHERS #
-########################################
-
-class VaultAES(object):
-
- # this version has been obsoleted by the VaultAES256 class
- # which uses encrypt-then-mac (fixing order) and also improving the KDF used
- # code remains for upgrade purposes only
- # http://stackoverflow.com/a/16761459
-
- def __init__(self):
- if not md5:
- raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.')
- if not HAS_AES:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
-
- """ Create a key and an initialization vector """
-
- d = d_i = ''
- while len(d) < key_length + iv_length:
- d_i = md5(d_i + password + salt).digest()
- d += d_i
-
- key = d[:key_length]
- iv = d[key_length:key_length+iv_length]
-
- return key, iv
-
- def encrypt(self, data, password, key_length=32):
-
- """ Read plaintext data from in_file and write encrypted to out_file """
-
-
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
-
- in_file = BytesIO(tmp_data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
-
- # Get a block of random data. EL does not have Crypto.Random.new()
- # so os.urandom is used for cross platform purposes
- salt = os.urandom(bs - len('Salted__'))
-
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- out_file.write('Salted__' + salt)
- finished = False
- while not finished:
- chunk = in_file.read(1024 * bs)
- if len(chunk) == 0 or len(chunk) % bs != 0:
- padding_length = (bs - len(chunk) % bs) or bs
- chunk += padding_length * chr(padding_length)
- finished = True
- out_file.write(cipher.encrypt(chunk))
-
- out_file.seek(0)
- enc_data = out_file.read()
- tmp_data = hexlify(enc_data)
-
- return tmp_data
-
-
- def decrypt(self, data, password, key_length=32):
-
- """ Read encrypted data from in_file and write decrypted to out_file """
-
- # http://stackoverflow.com/a/14989032
-
- data = ''.join(data.split('\n'))
- data = unhexlify(data)
-
- in_file = BytesIO(data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
- salt = in_file.read(bs)[len('Salted__'):]
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- next_chunk = ''
- finished = False
-
- while not finished:
- chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
- if len(next_chunk) == 0:
- padding_length = ord(chunk[-1])
- chunk = chunk[:-padding_length]
- finished = True
- out_file.write(chunk)
-
- # reset the stream pointer to the beginning
- out_file.seek(0)
- new_data = out_file.read()
-
- # split out sha and verify decryption
- split_data = new_data.split("\n")
- this_sha = split_data[0]
- this_data = '\n'.join(split_data[1:])
- test_sha = sha256(this_data).hexdigest()
-
- if this_sha != test_sha:
- raise errors.AnsibleError("Decryption failed")
-
- #return out_file.read()
- return this_data
-
-
-class VaultAES256(object):
-
- """
- Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
- Keys are derived using PBKDF2
- """
-
- # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
-
- def __init__(self):
-
- if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def gen_key_initctr(self, password, salt):
- # 16 for AES 128, 32 for AES256
- keylength = 32
-
- # match the size used for counter.new to avoid extra work
- ivlength = 16
-
- hash_function = SHA256
-
- # make two keys and one iv
- pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
-
-
- derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength,
- count=10000, prf=pbkdf2_prf)
-
- key1 = derivedkey[:keylength]
- key2 = derivedkey[keylength:(keylength * 2)]
- iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength]
-
- return key1, key2, hexlify(iv)
-
-
- def encrypt(self, data, password):
-
- salt = os.urandom(32)
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
- bs = AES.block_size
- padding_length = (bs - len(data) % bs) or bs
- data += padding_length * chr(padding_length)
-
- # COUNTER.new PARAMETERS
- # 1) nbits (integer) - Length of the counter, in bits.
- # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr
-
- ctr = Counter.new(128, initial_value=long(iv, 16))
-
- # AES.new PARAMETERS
- # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr
- # 2) MODE_CTR, is the recommended mode
- # 3) counter=<CounterObject>
-
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # ENCRYPT PADDED DATA
- cryptedData = cipher.encrypt(data)
-
- # COMBINE SALT, DIGEST AND DATA
- hmac = HMAC.new(key2, cryptedData, SHA256)
- message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) )
- message = hexlify(message)
- return message
-
- def decrypt(self, data, password):
-
- # SPLIT SALT, DIGEST, AND DATA
- data = ''.join(data.split("\n"))
- data = unhexlify(data)
- salt, cryptedHmac, cryptedData = data.split("\n", 2)
- salt = unhexlify(salt)
- cryptedData = unhexlify(cryptedData)
-
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # EXIT EARLY IF DIGEST DOESN'T MATCH
- hmacDecrypt = HMAC.new(key2, cryptedData, SHA256)
- if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()):
- return None
-
- # SET THE COUNTER AND THE CIPHER
- ctr = Counter.new(128, initial_value=long(iv, 16))
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # DECRYPT PADDED DATA
- decryptedData = cipher.decrypt(cryptedData)
-
- # UNPAD DATA
- padding_length = ord(decryptedData[-1])
- decryptedData = decryptedData[:-padding_length]
-
- return decryptedData
-
- def is_equal(self, a, b):
- # http://codahale.com/a-lesson-in-timing-attacks/
- if len(a) != len(b):
- return False
-
- result = 0
- for x, y in zip(a, b):
- result |= ord(x) ^ ord(y)
- return result == 0
-
-
diff --git a/v1/bin/ansible b/v1/bin/ansible
deleted file mode 100755
index 7fec34ec81..0000000000
--- a/v1/bin/ansible
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-########################################################
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-
-from ansible.runner import Runner
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import callbacks
-from ansible import inventory
-########################################################
-
-class Cli(object):
- ''' code behind bin/ansible '''
-
- # ----------------------------------------------
-
- def __init__(self):
- self.stats = callbacks.AggregateStats()
- self.callbacks = callbacks.CliRunnerCallbacks()
- if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
- callbacks.load_callback_plugins()
-
- # ----------------------------------------------
-
- def parse(self):
- ''' create an options parser for bin/ansible '''
-
- parser = utils.base_parser(
- constants=C,
- runas_opts=True,
- subset_opts=True,
- async_opts=True,
- output_opts=True,
- connect_opts=True,
- check_opts=True,
- diff_opts=False,
- usage='%prog <host-pattern> [options]'
- )
-
- parser.add_option('-a', '--args', dest='module_args',
- help="module arguments", default=C.DEFAULT_MODULE_ARGS)
- parser.add_option('-m', '--module-name', dest='module_name',
- help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
- default=C.DEFAULT_MODULE_NAME)
-
- options, args = parser.parse_args()
- self.callbacks.options = options
-
- if len(args) == 0 or len(args) > 1:
- parser.print_help()
- sys.exit(1)
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- return (options, args)
-
- # ----------------------------------------------
-
- def run(self, options, args):
- ''' use Runner lib to do SSH things '''
-
- pattern = args[0]
-
- sshpass = becomepass = vault_pass = become_method = None
-
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- # become
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
- if options.subset:
- inventory_manager.subset(options.subset)
- hosts = inventory_manager.list_hosts(pattern)
-
- if len(hosts) == 0:
- callbacks.display("No hosts matched", stderr=True)
- sys.exit(0)
-
- if options.listhosts:
- for host in hosts:
- callbacks.display(' %s' % host)
- sys.exit(0)
-
- if options.module_name in ['command','shell'] and not options.module_args:
- callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
- sys.exit(1)
-
- if options.tree:
- utils.prepare_writeable_dir(options.tree)
-
- runner = Runner(
- module_name=options.module_name,
- module_path=options.module_path,
- module_args=options.module_args,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- inventory=inventory_manager,
- timeout=options.timeout,
- private_key_file=options.private_key_file,
- forks=options.forks,
- pattern=pattern,
- callbacks=self.callbacks,
- transport=options.connection,
- subset=options.subset,
- check=options.check,
- diff=options.check,
- vault_pass=vault_pass,
- become=options.become,
- become_method=options.become_method,
- become_pass=becomepass,
- become_user=options.become_user,
- extra_vars=extra_vars,
- )
-
- if options.seconds:
- callbacks.display("background launch...\n\n", color='cyan')
- results, poller = runner.run_async(options.seconds)
- results = self.poll_while_needed(poller, options)
- else:
- results = runner.run()
-
- return (runner, results)
-
- # ----------------------------------------------
-
- def poll_while_needed(self, poller, options):
- ''' summarize results from Runner '''
-
- # BACKGROUND POLL LOGIC when -B and -P are specified
- if options.seconds and options.poll_interval > 0:
- poller.wait(options.seconds, options.poll_interval)
-
- return poller.results
-
-
-########################################################
-
-if __name__ == '__main__':
- callbacks.display("", log_only=True)
- callbacks.display(" ".join(sys.argv), log_only=True)
- callbacks.display("", log_only=True)
-
- cli = Cli()
- (options, args) = cli.parse()
- try:
- (runner, results) = cli.run(options, args)
- for result in results['contacted'].values():
- if 'failed' in result or result.get('rc', 0) != 0:
- sys.exit(2)
- if results['dark']:
- sys.exit(3)
- except errors.AnsibleError, e:
- # Generic handler for ansible specific errors
- callbacks.display("ERROR: %s" % str(e), stderr=True, color='red')
- sys.exit(1)
-
diff --git a/v1/bin/ansible-doc b/v1/bin/ansible-doc
deleted file mode 100755
index dff7cecce7..0000000000
--- a/v1/bin/ansible-doc
+++ /dev/null
@@ -1,337 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import sys
-import textwrap
-import re
-import optparse
-import datetime
-import subprocess
-import fcntl
-import termios
-import struct
-
-from ansible import utils
-from ansible.utils import module_docs
-import ansible.constants as C
-from ansible.utils import version
-import traceback
-
-MODULEDIR = C.DEFAULT_MODULE_PATH
-
-BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
-IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-PAGER = 'less'
-LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
- # -S (chop long lines) -X (disable termcap init and de-init)
-
-def pager_print(text):
- ''' just print text '''
- print text
-
-def pager_pipe(text, cmd):
- ''' pipe text through a pager '''
- if 'LESS' not in os.environ:
- os.environ['LESS'] = LESS_OPTS
- try:
- cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
- cmd.communicate(input=text)
- except IOError:
- pass
- except KeyboardInterrupt:
- pass
-
-def pager(text):
- ''' find reasonable way to display text '''
- # this is a much simpler form of what is in pydoc.py
- if not sys.stdout.isatty():
- pager_print(text)
- elif 'PAGER' in os.environ:
- if sys.platform == 'win32':
- pager_print(text)
- else:
- pager_pipe(text, os.environ['PAGER'])
- elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
- pager_pipe(text, 'less')
- else:
- pager_print(text)
-
-def tty_ify(text):
-
- t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
- t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
- t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
- t = _URL.sub(r"\1", t) # U(word) => word
- t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
-
- return t
-
-def get_man_text(doc):
-
- opt_indent=" "
- text = []
- text.append("> %s\n" % doc['module'].upper())
-
- desc = " ".join(doc['description'])
-
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
-
- if 'option_keys' in doc and len(doc['option_keys']) > 0:
- text.append("Options (= is mandatory):\n")
-
- for o in sorted(doc['option_keys']):
- opt = doc['options'][o]
-
- if opt.get('required', False):
- opt_leadin = "="
- else:
- opt_leadin = "-"
-
- text.append("%s %s" % (opt_leadin, o))
-
- desc = " ".join(opt['description'])
-
- if 'choices' in opt:
- choices = ", ".join(str(i) for i in opt['choices'])
- desc = desc + " (Choices: " + choices + ")"
- if 'default' in opt:
- default = str(opt['default'])
- desc = desc + " [Default: " + default + "]"
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
- subsequent_indent=opt_indent))
-
- if 'notes' in doc and len(doc['notes']) > 0:
- notes = " ".join(doc['notes'])
- text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
- subsequent_indent=opt_indent))
-
-
- if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
- req = ", ".join(doc['requirements'])
- text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
- subsequent_indent=opt_indent))
-
- if 'examples' in doc and len(doc['examples']) > 0:
- text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
- for ex in doc['examples']:
- text.append("%s\n" % (ex['code']))
-
- if 'plainexamples' in doc and doc['plainexamples'] is not None:
- text.append("EXAMPLES:")
- text.append(doc['plainexamples'])
- if 'returndocs' in doc and doc['returndocs'] is not None:
- text.append("RETURN VALUES:")
- text.append(doc['returndocs'])
- text.append('')
-
- return "\n".join(text)
-
-
-def get_snippet_text(doc):
-
- text = []
- desc = tty_ify(" ".join(doc['short_description']))
- text.append("- name: %s" % (desc))
- text.append(" action: %s" % (doc['module']))
-
- for o in sorted(doc['options'].keys()):
- opt = doc['options'][o]
- desc = tty_ify(" ".join(opt['description']))
-
- if opt.get('required', False):
- s = o + "="
- else:
- s = o
-
- text.append(" %-20s # %s" % (s, desc))
- text.append('')
-
- return "\n".join(text)
-
-def get_module_list_text(module_list):
- tty_size = 0
- if os.isatty(0):
- tty_size = struct.unpack('HHHH',
- fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
- columns = max(60, tty_size)
- displace = max(len(x) for x in module_list)
- linelimit = columns - displace - 5
- text = []
- deprecated = []
- for module in sorted(set(module_list)):
-
- if module in module_docs.BLACKLIST_MODULES:
- continue
-
- filename = utils.plugins.module_finder.find_plugin(module)
-
- if filename is None:
- continue
- if filename.endswith(".ps1"):
- continue
- if os.path.isdir(filename):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- desc = tty_ify(doc.get('short_description', '?')).strip()
- if len(desc) > linelimit:
- desc = desc[:linelimit] + '...'
-
- if module.startswith('_'): # Handle deprecated
- deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
- else:
- text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
-
- if len(deprecated) > 0:
- text.append("\nDEPRECATED:")
- text.extend(deprecated)
- return "\n".join(text)
-
-def find_modules(path, module_list):
-
- if os.path.isdir(path):
- for module in os.listdir(path):
- if module.startswith('.'):
- continue
- elif os.path.isdir(module):
- find_modules(module, module_list)
- elif any(module.endswith(x) for x in BLACKLIST_EXTS):
- continue
- elif module.startswith('__'):
- continue
- elif module in IGNORE_FILES:
- continue
- elif module.startswith('_'):
- fullpath = '/'.join([path,module])
- if os.path.islink(fullpath): # avoids aliases
- continue
-
- module = os.path.splitext(module)[0] # removes the extension
- module_list.append(module)
-
-def main():
-
- p = optparse.OptionParser(
- version=version("%prog"),
- usage='usage: %prog [options] [module...]',
- description='Show Ansible module documentation',
- )
-
- p.add_option("-M", "--module-path",
- action="store",
- dest="module_path",
- default=MODULEDIR,
- help="Ansible modules/ directory")
- p.add_option("-l", "--list",
- action="store_true",
- default=False,
- dest='list_dir',
- help='List available modules')
- p.add_option("-s", "--snippet",
- action="store_true",
- default=False,
- dest='show_snippet',
- help='Show playbook snippet for specified module(s)')
- p.add_option('-v', action='version', help='Show version number and exit')
-
- (options, args) = p.parse_args()
-
- if options.module_path is not None:
- for i in options.module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- if options.list_dir:
- # list modules
- paths = utils.plugins.module_finder._get_paths()
- module_list = []
- for path in paths:
- find_modules(path, module_list)
-
- pager(get_module_list_text(module_list))
- sys.exit()
-
- if len(args) == 0:
- p.print_help()
-
- def print_paths(finder):
- ''' Returns a string suitable for printing of the search path '''
-
- # Uses a list to get the order right
- ret = []
- for i in finder._get_paths():
- if i not in ret:
- ret.append(i)
- return os.pathsep.join(ret)
-
- text = ''
- for module in args:
-
- filename = utils.plugins.module_finder.find_plugin(module)
- if filename is None:
- sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
- continue
-
- if any(filename.endswith(x) for x in BLACKLIST_EXTS):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
- continue
-
- if doc is not None:
-
- all_keys = []
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
- all_keys = sorted(all_keys)
- doc['option_keys'] = all_keys
-
- doc['filename'] = filename
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['plainexamples'] = plainexamples
- doc['returndocs'] = returndocs
-
- if options.show_snippet:
- text += get_snippet_text(doc)
- else:
- text += get_man_text(doc)
- else:
- # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
- # probably a quoting issue.
- sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
- pager(text)
-
-if __name__ == '__main__':
- main()
diff --git a/v1/bin/ansible-galaxy b/v1/bin/ansible-galaxy
deleted file mode 100755
index a6d625671e..0000000000
--- a/v1/bin/ansible-galaxy
+++ /dev/null
@@ -1,957 +0,0 @@
-#!/usr/bin/env python
-
-########################################################################
-#
-# (C) 2013, James Cammarata <jcammarata@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-########################################################################
-
-import datetime
-import json
-import os
-import os.path
-import shutil
-import subprocess
-import sys
-import tarfile
-import tempfile
-import urllib
-import urllib2
-import yaml
-
-from collections import defaultdict
-from distutils.version import LooseVersion
-from jinja2 import Environment
-from optparse import OptionParser
-
-import ansible.constants as C
-import ansible.utils
-from ansible.errors import AnsibleError
-
-default_meta_template = """---
-galaxy_info:
- author: {{ author }}
- description: {{description}}
- company: {{ company }}
- # If the issue tracker for your role is not on github, uncomment the
- # next line and provide a value
- # issue_tracker_url: {{ issue_tracker_url }}
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: {{ license }}
- min_ansible_version: {{ min_ansible_version }}
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- {%- for platform,versions in platforms.iteritems() %}
- #- name: {{ platform }}
- # versions:
- # - all
- {%- for version in versions %}
- # - {{ version }}
- {%- endfor %}
- {%- endfor %}
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- {%- for category in categories %}
- #- {{ category.name }}
- {%- endfor %}
-dependencies: []
- # List your role dependencies here, one per line.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
- {% for dependency in dependencies %}
- #- {{ dependency }}
- {% endfor %}
-
-"""
-
-default_readme_template = """Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
-"""
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("init", "info", "install", "list", "remove")
-SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- # - none yet
-
- # options specific to actions
- if action == "info":
- parser.set_usage("usage: %prog info [options] role_name[,version]")
- elif action == "init":
- parser.set_usage("usage: %prog init [options] role_name")
- parser.add_option(
- '-p', '--init-path', dest='init_path', default="./",
- help='The path in which the skeleton role will be created. '
- 'The default is the current working directory.')
- parser.add_option(
- '--offline', dest='offline', default=False, action='store_true',
- help="Don't query the galaxy API when creating roles")
- elif action == "install":
- parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
- parser.add_option(
- '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
- help='Ignore errors and continue with the next specified role.')
- parser.add_option(
- '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
- help='Don\'t download roles listed as dependencies')
- parser.add_option(
- '-r', '--role-file', dest='role_file',
- help='A file containing a list of roles to be imported')
- elif action == "remove":
- parser.set_usage("usage: %prog remove role1 role2 ...")
- elif action == "list":
- parser.set_usage("usage: %prog list [role_name]")
-
- # options that apply to more than one action
- if action != "init":
- parser.add_option(
- '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
- help='The path to the directory containing your roles. '
- 'The default is the roles_path configured in your '
- 'ansible.cfg file (/etc/ansible/roles if not configured)')
-
- if action in ("info","init","install"):
- parser.add_option(
- '-s', '--server', dest='api_server', default="galaxy.ansible.com",
- help='The API server destination')
-
- if action in ("init","install"):
- parser.add_option(
- '-f', '--force', dest='force', action='store_true', default=False,
- help='Force overwriting an existing role')
- # done, return the parser
- return parser
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-def exit_without_ignore(options, rc=1):
- """
- Exits with the specified return code unless the
- option --ignore-errors was specified
- """
-
- if not get_opt(options, "ignore_errors", False):
- print '- you can use --ignore-errors to skip failed roles.'
- sys.exit(rc)
-
-
-#-------------------------------------------------------------------------------------
-# Galaxy API functions
-#-------------------------------------------------------------------------------------
-
-def api_get_config(api_server):
- """
- Fetches the Galaxy API current version to ensure
- the API server is up and reachable.
- """
-
- try:
- url = 'https://%s/api/' % api_server
- data = json.load(urllib2.urlopen(url))
- if not data.get("current_version",None):
- return None
- else:
- return data
- except:
- return None
-
-def api_lookup_role_by_name(api_server, role_name, notify=True):
- """
- Uses the Galaxy API to do a lookup on the role owner/name.
- """
-
- role_name = urllib.quote(role_name)
-
- try:
- parts = role_name.split(".")
- user_name = ".".join(parts[0:-1])
- role_name = parts[-1]
- if notify:
- print "- downloading role '%s', owned by %s" % (role_name, user_name)
- except:
- parser.print_help()
- print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
- sys.exit(1)
-
- url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
- try:
- data = json.load(urllib2.urlopen(url))
- if len(data["results"]) == 0:
- return None
- else:
- return data["results"][0]
- except:
- return None
-
-def api_fetch_role_related(api_server, related, role_id):
- """
- Uses the Galaxy API to fetch the list of related items for
- the given role. The url comes from the 'related' field of
- the role.
- """
-
- try:
- url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
- data = json.load(urllib2.urlopen(url))
- results = data['results']
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- return None
-
-def api_get_list(api_server, what):
- """
- Uses the Galaxy API to fetch the list of items specified.
- """
-
- try:
- url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
- data = json.load(urllib2.urlopen(url))
- if "results" in data:
- results = data['results']
- else:
- results = data
- done = True
- if "next" in data:
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- print "- failed to download the %s list" % what
- return None
-
-#-------------------------------------------------------------------------------------
-# scm repo utility functions
-#-------------------------------------------------------------------------------------
-
-def scm_archive_role(scm, role_url, role_version, role_name):
- if scm not in ['hg', 'git']:
- print "- scm %s is not currently supported" % scm
- return False
- tempdir = tempfile.mkdtemp()
- clone_cmd = [scm, 'clone', role_url, role_name]
- with open('/dev/null', 'w') as devnull:
- try:
- print "- executing: %s" % " ".join(clone_cmd)
- popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
- except:
- raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(clone_cmd)
- print " in directory %s" % tempdir
- return False
-
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
- if scm == 'hg':
- archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
- if role_version:
- archive_cmd.extend(['-r', role_version])
- archive_cmd.append(temp_file.name)
- if scm == 'git':
- archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
- if role_version:
- archive_cmd.append(role_version)
- else:
- archive_cmd.append('HEAD')
-
- with open('/dev/null', 'w') as devnull:
- print "- executing: %s" % " ".join(archive_cmd)
- popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
- stderr=devnull, stdout=devnull)
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(archive_cmd)
- print " in directory %s" % tempdir
- return False
-
- shutil.rmtree(tempdir, ignore_errors=True)
-
- return temp_file.name
-
-
-#-------------------------------------------------------------------------------------
-# Role utility functions
-#-------------------------------------------------------------------------------------
-
-def get_role_path(role_name, options):
- """
- Returns the role path based on the roles_path option
- and the role name.
- """
- roles_path = get_opt(options,'roles_path')
- roles_path = os.path.join(roles_path, role_name)
- roles_path = os.path.expanduser(roles_path)
- return roles_path
-
-def get_role_metadata(role_name, options):
- """
- Returns the metadata as YAML, if the file 'meta/main.yml'
- exists in the specified role_path
- """
- role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
- try:
- if os.path.isfile(role_path):
- f = open(role_path, 'r')
- meta_data = yaml.safe_load(f)
- f.close()
- return meta_data
- else:
- return None
- except:
- return None
-
-def get_galaxy_install_info(role_name, options):
- """
- Returns the YAML data contained in 'meta/.galaxy_install_info',
- if it exists.
- """
-
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- if os.path.isfile(info_path):
- f = open(info_path, 'r')
- info_data = yaml.safe_load(f)
- f.close()
- return info_data
- else:
- return None
- except:
- return None
-
-def write_galaxy_install_info(role_name, role_version, options):
- """
- Writes a YAML-formatted file to the role's meta/ directory
- (named .galaxy_install_info) which contains some information
- we can use later for commands like 'list' and 'info'.
- """
-
- info = dict(
- version = role_version,
- install_date = datetime.datetime.utcnow().strftime("%c"),
- )
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- f = open(info_path, 'w+')
- info_data = yaml.safe_dump(info, f)
- f.close()
- except:
- return False
- return True
-
-
-def remove_role(role_name, options):
- """
- Removes the specified role from the roles path. There is a
- sanity check to make sure there's a meta/main.yml file at this
- path so the user doesn't blow away random directories
- """
- if get_role_metadata(role_name, options):
- role_path = get_role_path(role_name, options)
- shutil.rmtree(role_path)
- return True
- else:
- return False
-
-def fetch_role(role_name, target, role_data, options):
- """
- Downloads the archived role from github to a temp location, extracts
- it, and then copies the extracted role to the role library path.
- """
-
- # first grab the file and save it to a temp location
- if '://' in role_name:
- archive_url = role_name
- else:
- archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
- print "- downloading role from %s" % archive_url
-
- try:
- url_file = urllib2.urlopen(archive_url)
- temp_file = tempfile.NamedTemporaryFile(delete=False)
- data = url_file.read()
- while data:
- temp_file.write(data)
- data = url_file.read()
- temp_file.close()
- return temp_file.name
- except Exception, e:
- # TODO: better urllib2 error handling for error
- # messages that are more exact
- print "- error: failed to download the file."
- return False
-
-def install_role(role_name, role_version, role_filename, options):
- # the file is a tar, so open it that way and extract it
- # to the specified (or default) roles directory
-
- if not tarfile.is_tarfile(role_filename):
- print "- error: the file downloaded was not a tar.gz"
- return False
- else:
- if role_filename.endswith('.gz'):
- role_tar_file = tarfile.open(role_filename, "r:gz")
- else:
- role_tar_file = tarfile.open(role_filename, "r")
- # verify the role's meta file
- meta_file = None
- members = role_tar_file.getmembers()
- # next find the metadata file
- for member in members:
- if "/meta/main.yml" in member.name:
- meta_file = member
- break
- if not meta_file:
- print "- error: this role does not appear to have a meta/main.yml file."
- return False
- else:
- try:
- meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
- except:
- print "- error: this role does not appear to have a valid meta/main.yml file."
- return False
-
- # we strip off the top-level directory for all of the files contained within
- # the tar file here, since the default is 'github_repo-target', and change it
- # to the specified role's name
- role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
- role_path = os.path.expanduser(role_path)
- print "- extracting %s to %s" % (role_name, role_path)
- try:
- if os.path.exists(role_path):
- if not os.path.isdir(role_path):
- print "- error: the specified roles path exists and is not a directory."
- return False
- elif not get_opt(options, "force", False):
- print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
- return False
- else:
- # using --force, remove the old path
- if not remove_role(role_name, options):
- print "- error: %s doesn't appear to contain a role." % role_path
- print " please remove this directory manually if you really want to put the role here."
- return False
- else:
- os.makedirs(role_path)
-
- # now we do the actual extraction to the role_path
- for member in members:
- # we only extract files, and remove any relative path
- # bits that might be in the file for security purposes
- # and drop the leading directory, as mentioned above
- if member.isreg() or member.issym():
- parts = member.name.split("/")[1:]
- final_parts = []
- for part in parts:
- if part != '..' and '~' not in part and '$' not in part:
- final_parts.append(part)
- member.name = os.path.join(*final_parts)
- role_tar_file.extract(member, role_path)
-
- # write out the install info file for later use
- write_galaxy_install_info(role_name, role_version, options)
- except OSError, e:
- print "- error: you do not have permission to modify files in %s" % role_path
- return False
-
- # return the parsed yaml metadata
- print "- %s was installed successfully" % role_name
- return meta_file_data
-
-#-------------------------------------------------------------------------------------
-# Action functions
-#-------------------------------------------------------------------------------------
-
-def execute_init(args, options, parser):
- """
- Executes the init action, which creates the skeleton framework
- of a role that complies with the galaxy metadata format.
- """
-
- init_path = get_opt(options, 'init_path', './')
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- force = get_opt(options, 'force', False)
- offline = get_opt(options, 'offline', False)
-
- if not offline:
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- try:
- role_name = args.pop(0).strip()
- if role_name == "":
- raise Exception("")
- role_path = os.path.join(init_path, role_name)
- if os.path.exists(role_path):
- if os.path.isfile(role_path):
- print "- the path %s already exists, but is a file - aborting" % role_path
- sys.exit(1)
- elif not force:
- print "- the directory %s already exists." % role_path
- print " you can use --force to re-initialize this directory,\n" + \
- " however it will reset any main.yml files that may have\n" + \
- " been modified there already."
- sys.exit(1)
- except Exception, e:
- parser.print_help()
- print "- no role name specified for init"
- sys.exit(1)
-
- ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
-
- # create the default README.md
- if not os.path.exists(role_path):
- os.makedirs(role_path)
- readme_path = os.path.join(role_path, "README.md")
- f = open(readme_path, "wb")
- f.write(default_readme_template)
- f.close
-
- for dir in ROLE_DIRS:
- dir_path = os.path.join(init_path, role_name, dir)
- main_yml_path = os.path.join(dir_path, 'main.yml')
- # create the directory if it doesn't exist already
- if not os.path.exists(dir_path):
- os.makedirs(dir_path)
-
- # now create the main.yml file for that directory
- if dir == "meta":
- # create a skeleton meta/main.yml with a valid galaxy_info
- # datastructure in place, plus with all of the available
- # tags/platforms included (but commented out) and the
- # dependencies section
- platforms = []
- if not offline:
- platforms = api_get_list(api_server, "platforms") or []
- categories = []
- if not offline:
- categories = api_get_list(api_server, "categories") or []
-
- # group the list of platforms from the api based
- # on their names, with the release field being
- # appended to a list of versions
- platform_groups = defaultdict(list)
- for platform in platforms:
- platform_groups[platform['name']].append(platform['release'])
- platform_groups[platform['name']].sort()
-
- inject = dict(
- author = 'your name',
- company = 'your company (optional)',
- license = 'license (GPLv2, CC-BY, etc)',
- issue_tracker_url = 'http://example.com/issue/tracker',
- min_ansible_version = '1.2',
- platforms = platform_groups,
- categories = categories,
- )
- rendered_meta = Environment().from_string(default_meta_template).render(inject)
- f = open(main_yml_path, 'w')
- f.write(rendered_meta)
- f.close()
- pass
- elif dir not in ('files','templates'):
- # just write a (mostly) empty YAML file for main.yml
- f = open(main_yml_path, 'w')
- f.write('---\n# %s file for %s\n' % (dir,role_name))
- f.close()
- print "- %s was created successfully" % role_name
-
-def execute_info(args, options, parser):
- """
- Executes the info action. This action prints out detailed
- information about an installed role as well as info available
- from the galaxy API.
- """
-
- if len(args) == 0:
- # the user needs to specify a role
- parser.print_help()
- print "- you must specify a user/role name"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- api_config = api_get_config(api_server)
- roles_path = get_opt(options, "roles_path")
-
- for role in args:
-
- role_info = {}
-
- install_info = get_galaxy_install_info(role, options)
- if install_info:
- if 'version' in install_info:
- install_info['intalled_version'] = install_info['version']
- del install_info['version']
- role_info.update(install_info)
-
- remote_data = api_lookup_role_by_name(api_server, role, False)
- if remote_data:
- role_info.update(remote_data)
-
- metadata = get_role_metadata(role, options)
- if metadata:
- role_info.update(metadata)
-
- role_spec = ansible.utils.role_spec_parse(role)
- if role_spec:
- role_info.update(role_spec)
-
- if role_info:
- print "- %s:" % (role)
- for k in sorted(role_info.keys()):
-
- if k in SKIP_INFO_KEYS:
- continue
-
- if isinstance(role_info[k], dict):
- print "\t%s: " % (k)
- for key in sorted(role_info[k].keys()):
- if key in SKIP_INFO_KEYS:
- continue
- print "\t\t%s: %s" % (key, role_info[k][key])
- else:
- print "\t%s: %s" % (k, role_info[k])
- else:
- print "- the role %s was not found" % role
-
-def execute_install(args, options, parser):
- """
- Executes the installation action. The args list contains the
- roles to be installed, unless -f was specified. The list of roles
- can be a name (which will be downloaded via the galaxy API and github),
- or it can be a local .tar.gz file.
- """
-
- role_file = get_opt(options, "role_file", None)
-
- if len(args) == 0 and role_file is None:
- # the user needs to specify one of either --role-file
- # or specify a single user/role name
- parser.print_help()
- print "- you must specify a user/role name or a roles file"
- sys.exit()
- elif len(args) == 1 and not role_file is None:
- # using a role file is mutually exclusive of specifying
- # the role name on the command line
- parser.print_help()
- print "- please specify a user/role name, or a roles file, but not both"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- no_deps = get_opt(options, "no_deps", False)
- roles_path = get_opt(options, "roles_path")
-
- roles_done = []
- if role_file:
- f = open(role_file, 'r')
- if role_file.endswith('.yaml') or role_file.endswith('.yml'):
- roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
- else:
- # roles listed in a file, one per line
- roles_left = map(ansible.utils.role_spec_parse, f.readlines())
- f.close()
- else:
- # roles were specified directly, so we'll just go out grab them
- # (and their dependencies, unless the user doesn't want us to).
- roles_left = map(ansible.utils.role_spec_parse, args)
-
- while len(roles_left) > 0:
- # query the galaxy API for the role data
- role_data = None
- role = roles_left.pop(0)
- role_src = role.get("src")
- role_scm = role.get("scm")
- role_path = role.get("path")
-
- if role_path:
- options.roles_path = role_path
- else:
- options.roles_path = roles_path
-
- if os.path.isfile(role_src):
- # installing a local tar.gz
- tmp_file = role_src
- else:
- if role_scm:
- # create tar file from scm url
- tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
- elif '://' in role_src:
- # just download a URL - version will probably be in the URL
- tmp_file = fetch_role(role_src, None, None, options)
- else:
- # installing from galaxy
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- role_data = api_lookup_role_by_name(api_server, role_src)
- if not role_data:
- print "- sorry, %s was not found on %s." % (role_src, api_server)
- exit_without_ignore(options)
- continue
-
- role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
- if "version" not in role or role['version'] == '':
- # convert the version names to LooseVersion objects
- # and sort them to get the latest version. If there
- # are no versions in the list, we'll grab the head
- # of the master branch
- if len(role_versions) > 0:
- loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
- loose_versions.sort()
- role["version"] = str(loose_versions[-1])
- else:
- role["version"] = 'master'
- elif role['version'] != 'master':
- if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
- print 'role is %s' % role
- print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
- exit_without_ignore(options)
- continue
-
- # download the role. if --no-deps was specified, we stop here,
- # otherwise we recursively grab roles and all of their deps.
- tmp_file = fetch_role(role_src, role["version"], role_data, options)
- installed = False
- if tmp_file:
- installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
- # we're done with the temp file, clean it up
- if tmp_file != role_src:
- os.unlink(tmp_file)
- # install dependencies, if we want them
- if not no_deps and installed:
- if not role_data:
- role_data = get_role_metadata(role.get("name"), options)
- role_dependencies = role_data['dependencies']
- else:
- role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
- for dep in role_dependencies:
- if isinstance(dep, basestring):
- dep = ansible.utils.role_spec_parse(dep)
- else:
- dep = ansible.utils.role_yaml_parse(dep)
- if not get_role_metadata(dep["name"], options):
- if dep not in roles_left:
- print '- adding dependency: %s' % dep["name"]
- roles_left.append(dep)
- else:
- print '- dependency %s already pending installation.' % dep["name"]
- else:
- print '- dependency %s is already installed, skipping.' % dep["name"]
- if not tmp_file or not installed:
- print "- %s was NOT installed successfully." % role.get("name")
- exit_without_ignore(options)
- sys.exit(0)
-
-def execute_remove(args, options, parser):
- """
- Executes the remove action. The args list contains the list
- of roles to be removed. This list can contain more than one role.
- """
-
- if len(args) == 0:
- parser.print_help()
- print '- you must specify at least one role to remove.'
- sys.exit()
-
- for role in args:
- if get_role_metadata(role, options):
- if remove_role(role, options):
- print '- successfully removed %s' % role
- else:
- print "- failed to remove role: %s" % role
- else:
- print '- %s is not installed, skipping.' % role
- sys.exit(0)
-
-def execute_list(args, options, parser):
- """
- Executes the list action. The args list can contain zero
- or one role. If one is specified, only that role will be
- shown, otherwise all roles in the specified directory will
- be shown.
- """
-
- if len(args) > 1:
- print "- please specify only one role to list, or specify no roles to see a full list"
- sys.exit(1)
-
- if len(args) == 1:
- # show only the request role, if it exists
- role_name = args[0]
- metadata = get_role_metadata(role_name, options)
- if metadata:
- install_info = get_galaxy_install_info(role_name, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- # show some more info about single roles here
- print "- %s, %s" % (role_name, version)
- else:
- print "- the role %s was not found" % role_name
- else:
- # show all valid roles in the roles_path directory
- roles_path = get_opt(options, 'roles_path')
- roles_path = os.path.expanduser(roles_path)
- if not os.path.exists(roles_path):
- parser.print_help()
- print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
- sys.exit(1)
- elif not os.path.isdir(roles_path):
- print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
- parser.print_help()
- sys.exit(1)
- path_files = os.listdir(roles_path)
- for path_file in path_files:
- if get_role_metadata(path_file, options):
- install_info = get_galaxy_install_info(path_file, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- print "- %s, %s" % (path_file, version)
- sys.exit(0)
-
-#-------------------------------------------------------------------------------------
-# The main entry point
-#-------------------------------------------------------------------------------------
-
-def main():
- # parse the CLI options
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- # execute the desired action
- if 1: #try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- #except KeyError, e:
- # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
- # sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/v1/bin/ansible-playbook b/v1/bin/ansible-playbook
deleted file mode 100755
index 3d6e1f9f40..0000000000
--- a/v1/bin/ansible-playbook
+++ /dev/null
@@ -1,330 +0,0 @@
-#!/usr/bin/env python
-# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#######################################################
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import sys
-import os
-import stat
-
-# Augment PYTHONPATH to find Python modules relative to this file path
-# This is so that we can find the modules when running from a local checkout
-# installed as editable with `pip install -e ...` or `python setup.py develop`
-local_module_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), '..', 'lib')
-)
-sys.path.append(local_module_path)
-
-import ansible.playbook
-import ansible.constants as C
-import ansible.utils.template
-from ansible import errors
-from ansible import callbacks
-from ansible import utils
-from ansible.color import ANSIBLE_COLOR, stringc
-from ansible.callbacks import display
-
-def colorize(lead, num, color):
- """ Print 'lead' = 'num' in 'color' """
- if num != 0 and ANSIBLE_COLOR and color is not None:
- return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
- else:
- return "%s=%-4s" % (lead, str(num))
-
-def hostcolor(host, stats, color=True):
- if ANSIBLE_COLOR and color:
- if stats['failures'] != 0 or stats['unreachable'] != 0:
- return "%-37s" % stringc(host, 'red')
- elif stats['changed'] != 0:
- return "%-37s" % stringc(host, 'yellow')
- else:
- return "%-37s" % stringc(host, 'green')
- return "%-26s" % host
-
-
-def main(args):
- ''' run ansible-playbook operations '''
-
- # create parser for CLI options
- parser = utils.base_parser(
- constants=C,
- usage = "%prog playbook.yml",
- connect_opts=True,
- runas_opts=True,
- subset_opts=True,
- check_opts=True,
- diff_opts=True
- )
- #parser.add_option('--vault-password', dest="vault_password",
- # help="password for vault encrypted files")
- parser.add_option('-t', '--tags', dest='tags', default='all',
- help="only run plays and tasks tagged with these values")
- parser.add_option('--skip-tags', dest='skip_tags',
- help="only run plays and tasks whose tags do not match these values")
- parser.add_option('--syntax-check', dest='syntax', action='store_true',
- help="perform a syntax check on the playbook, but do not execute it")
- parser.add_option('--list-tasks', dest='listtasks', action='store_true',
- help="list all tasks that would be executed")
- parser.add_option('--list-tags', dest='listtags', action='store_true',
- help="list all available tags")
- parser.add_option('--step', dest='step', action='store_true',
- help="one-step-at-a-time: confirm each task before running")
- parser.add_option('--start-at-task', dest='start_at',
- help="start the playbook at the task matching this name")
- parser.add_option('--force-handlers', dest='force_handlers',
- default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
- help="run handlers even if a task fails")
- parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
- help="clear the fact cache")
-
- options, args = parser.parse_args(args)
-
- if len(args) == 0:
- parser.print_help(file=sys.stderr)
- return 1
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- sshpass = None
- becomepass = None
- vault_pass = None
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- if options.listhosts or options.syntax or options.listtasks or options.listtags:
- (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
-
- # set pe options
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
- become_ask_pass=options.become_ask_pass,
- ask_vault_pass=options.ask_vault_pass,
- become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- only_tags = options.tags.split(",")
- skip_tags = options.skip_tags
- if options.skip_tags is not None:
- skip_tags = options.skip_tags.split(",")
-
- for playbook in args:
- if not os.path.exists(playbook):
- raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
- if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
- raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
-
- inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
-
- # Note: slightly wrong, this is written so that implicit localhost
- # (which is not returned in list_hosts()) is taken into account for
- # warning if inventory is empty. But it can't be taken into account for
- # checking if limit doesn't match any hosts. Instead we don't worry about
- # limit if only implicit localhost was in inventory to start with.
- #
- # Fix this in v2
- no_hosts = False
- if len(inventory.list_hosts()) == 0:
- # Empty inventory
- utils.warning("provided hosts list is empty, only localhost is available")
- no_hosts = True
- inventory.subset(options.subset)
- if len(inventory.list_hosts()) == 0 and no_hosts is False:
- # Invalid limit
- raise errors.AnsibleError("Specified --limit does not match any hosts")
-
- # run all playbooks specified on the command line
- for playbook in args:
-
- stats = callbacks.AggregateStats()
- playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
- if options.step:
- playbook_cb.step = options.step
- if options.start_at:
- playbook_cb.start_at = options.start_at
- runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
-
- pb = ansible.playbook.PlayBook(
- playbook=playbook,
- module_path=options.module_path,
- inventory=inventory,
- forks=options.forks,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- callbacks=playbook_cb,
- runner_callbacks=runner_cb,
- stats=stats,
- timeout=options.timeout,
- transport=options.connection,
- become=options.become,
- become_method=options.become_method,
- become_user=options.become_user,
- become_pass=becomepass,
- extra_vars=extra_vars,
- private_key_file=options.private_key_file,
- only_tags=only_tags,
- skip_tags=skip_tags,
- check=options.check,
- diff=options.diff,
- vault_password=vault_pass,
- force_handlers=options.force_handlers,
- )
-
- if options.flush_cache:
- display(callbacks.banner("FLUSHING FACT CACHE"))
- pb.SETUP_CACHE.flush()
-
- if options.listhosts or options.listtasks or options.syntax or options.listtags:
- print ''
- print 'playbook: %s' % playbook
- print ''
- playnum = 0
- for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
- playnum += 1
- play = ansible.playbook.Play(pb, play_ds, play_basedir,
- vault_password=pb.vault_password)
- label = play.name
- hosts = pb.inventory.list_hosts(play.hosts)
-
- if options.listhosts:
- print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
- for host in hosts:
- print ' %s' % host
-
- if options.listtags or options.listtasks:
- print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
-
- if options.listtags:
- tags = []
- for task in pb.tasks_to_run_in_play(play):
- tags.extend(task.tags)
- print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
-
- if options.listtasks:
-
- for task in pb.tasks_to_run_in_play(play):
- if getattr(task, 'name', None) is not None:
- # meta tasks have no names
- print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
-
- if options.listhosts or options.listtasks or options.listtags:
- print ''
- continue
-
- if options.syntax:
- # if we've not exited by now then we are fine.
- print 'Playbook Syntax is fine'
- return 0
-
- failed_hosts = []
- unreachable_hosts = []
-
- try:
-
- pb.run()
-
- hosts = sorted(pb.stats.processed.keys())
- display(callbacks.banner("PLAY RECAP"))
- playbook_cb.on_stats(pb.stats)
-
- for h in hosts:
- t = pb.stats.summarize(h)
- if t['failures'] > 0:
- failed_hosts.append(h)
- if t['unreachable'] > 0:
- unreachable_hosts.append(h)
-
- retries = failed_hosts + unreachable_hosts
-
- if C.RETRY_FILES_ENABLED and len(retries) > 0:
- filename = pb.generate_retry_inventory(retries)
- if filename:
- display(" to retry, use: --limit @%s\n" % filename)
-
- for h in hosts:
- t = pb.stats.summarize(h)
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize('ok', t['ok'], 'green'),
- colorize('changed', t['changed'], 'yellow'),
- colorize('unreachable', t['unreachable'], 'red'),
- colorize('failed', t['failures'], 'red')),
- screen_only=True
- )
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize('ok', t['ok'], None),
- colorize('changed', t['changed'], None),
- colorize('unreachable', t['unreachable'], None),
- colorize('failed', t['failures'], None)),
- log_only=True
- )
-
-
- print ""
- if len(failed_hosts) > 0:
- return 2
- if len(unreachable_hosts) > 0:
- return 3
-
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red')
- return 1
-
- return 0
-
-
-if __name__ == "__main__":
- display(" ", log_only=True)
- display(" ".join(sys.argv), log_only=True)
- display(" ", log_only=True)
- try:
- sys.exit(main(sys.argv[1:]))
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red', stderr=True)
- sys.exit(1)
- except KeyboardInterrupt, ke:
- display("ERROR: interrupted", color='red', stderr=True)
- sys.exit(1)
diff --git a/v1/bin/ansible-pull b/v1/bin/ansible-pull
deleted file mode 100755
index d4887631e0..0000000000
--- a/v1/bin/ansible-pull
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Stephen Fromm <sfromm@gmail.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-
-# usage:
-# ansible-pull -d /var/lib/ansible \
-# -U http://example.net/content.git [-C production] \
-# [path/playbook.yml]
-#
-# the -d and -U arguments are required; the -C argument is optional.
-#
-# ansible-pull accepts an optional argument to specify a playbook
-# location underneath the workdir and then searches the source repo
-# for playbooks in the following order, stopping at the first match:
-#
-# 1. $workdir/path/playbook.yml, if specified
-# 2. $workdir/$fqdn.yml
-# 3. $workdir/$hostname.yml
-# 4. $workdir/local.yml
-#
-# the source repo must contain at least one of these playbooks.
-
-import os
-import shutil
-import sys
-import datetime
-import socket
-import random
-import time
-from ansible import utils
-from ansible.utils import cmd_functions
-from ansible import errors
-from ansible import inventory
-
-DEFAULT_REPO_TYPE = 'git'
-DEFAULT_PLAYBOOK = 'local.yml'
-PLAYBOOK_ERRORS = {1: 'File does not exist',
- 2: 'File is not readable'}
-
-VERBOSITY=0
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def try_playbook(path):
- if not os.path.exists(path):
- return 1
- if not os.access(path, os.R_OK):
- return 2
- return 0
-
-
-def select_playbook(path, args):
- playbook = None
- if len(args) > 0 and args[0] is not None:
- playbook = "%s/%s" % (path, args[0])
- rc = try_playbook(playbook)
- if rc != 0:
- print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
- return None
- return playbook
- else:
- fqdn = socket.getfqdn()
- hostpb = "%s/%s.yml" % (path, fqdn)
- shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
- localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
- errors = []
- for pb in [hostpb, shorthostpb, localpb]:
- rc = try_playbook(pb)
- if rc == 0:
- playbook = pb
- break
- else:
- errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
- if playbook is None:
- print >>sys.stderr, "\n".join(errors)
- return playbook
-
-
-def main(args):
- """ Set up and run a local playbook """
- usage = "%prog [options] [playbook.yml]"
- parser = utils.SortedOptParser(usage=usage)
- parser.add_option('--purge', default=False, action='store_true',
- help='purge checkout after playbook run')
- parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
- help='only run the playbook if the repository has been updated')
- parser.add_option('-s', '--sleep', dest='sleep', default=None,
- help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
- parser.add_option('-f', '--force', dest='force', default=False,
- action='store_true',
- help='run the playbook even if the repository could '
- 'not be updated')
- parser.add_option('-d', '--directory', dest='dest', default=None,
- help='directory to checkout repository to')
- #parser.add_option('-l', '--live', default=True, action='store_live',
- # help='Print the ansible-playbook output while running')
- parser.add_option('-U', '--url', dest='url', default=None,
- help='URL of the playbook repository')
- parser.add_option('-C', '--checkout', dest='checkout',
- help='branch/tag/commit to checkout. '
- 'Defaults to behavior of repository module.')
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="location of the inventory host file")
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-v', '--verbose', default=False, action="callback",
- callback=increment_debug,
- help='Pass -vvvv to ansible-playbook')
- parser.add_option('-m', '--module-name', dest='module_name',
- default=DEFAULT_REPO_TYPE,
- help='Module name used to check out repository. '
- 'Default is %s.' % DEFAULT_REPO_TYPE)
- parser.add_option('--vault-password-file', dest='vault_password_file',
- help="vault password file")
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password')
- parser.add_option('-t', '--tags', dest='tags', default=False,
- help='only run plays and tasks tagged with these values')
- parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
- help='adds the hostkey for the repo url if not already added')
- parser.add_option('--key-file', dest='key_file',
- help="Pass '-i <key_file>' to the SSH arguments used by git.")
- options, args = parser.parse_args(args)
-
- hostname = socket.getfqdn()
- if not options.dest:
- # use a hostname dependent directory, in case of $HOME on nfs
- options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
-
- options.dest = os.path.abspath(options.dest)
-
- if not options.url:
- parser.error("URL for repository not specified, use -h for help")
- return 1
-
- now = datetime.datetime.now()
- print now.strftime("Starting ansible-pull at %F %T")
-
- # Attempt to use the inventory passed in as an argument
- # It might not yet have been downloaded so use localhost if note
- if not options.inventory or not os.path.exists(options.inventory):
- inv_opts = 'localhost,'
- else:
- inv_opts = options.inventory
- limit_opts = 'localhost:%s:127.0.0.1' % hostname
- repo_opts = "name=%s dest=%s" % (options.url, options.dest)
-
- if VERBOSITY == 0:
- base_opts = '-c local --limit "%s"' % limit_opts
- elif VERBOSITY > 0:
- debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
- base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
-
- if options.checkout:
- repo_opts += ' version=%s' % options.checkout
-
- # Only git module is supported
- if options.module_name == DEFAULT_REPO_TYPE:
- if options.accept_host_key:
- repo_opts += ' accept_hostkey=yes'
-
- if options.key_file:
- repo_opts += ' key_file=%s' % options.key_file
-
- path = utils.plugins.module_finder.find_plugin(options.module_name)
- if path is None:
- sys.stderr.write("module '%s' not found.\n" % options.module_name)
- return 1
-
- bin_path = os.path.dirname(os.path.abspath(__file__))
- cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
- bin_path, inv_opts, base_opts, options.module_name, repo_opts
- )
-
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
-
- if options.sleep:
- try:
- secs = random.randint(0,int(options.sleep));
- except ValueError:
- parser.error("%s is not a number." % options.sleep)
- return 1
-
- print >>sys.stderr, "Sleeping for %d seconds..." % secs
- time.sleep(secs);
-
-
- # RUN THe CHECKOUT COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if rc != 0:
- if options.force:
- print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
- else:
- return rc
- elif options.ifchanged and '"changed": true' not in out:
- print "Repository has not changed, quitting."
- return 0
-
- playbook = select_playbook(options.dest, args)
-
- if playbook is None:
- print >>sys.stderr, "Could not find a playbook to run."
- return 1
-
- cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
- if options.vault_password_file:
- cmd += " --vault-password-file=%s" % options.vault_password_file
- if options.inventory:
- cmd += ' -i "%s"' % options.inventory
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
- if options.ask_sudo_pass:
- cmd += ' -K'
- if options.tags:
- cmd += ' -t "%s"' % options.tags
- os.chdir(options.dest)
-
- # RUN THE PLAYBOOK COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if options.purge:
- os.chdir('/')
- try:
- shutil.rmtree(options.dest)
- except Exception, e:
- print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
-
- return rc
-
-if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt, e:
- print >>sys.stderr, "Exit on user request.\n"
- sys.exit(1)
diff --git a/v1/bin/ansible-vault b/v1/bin/ansible-vault
deleted file mode 100755
index 22cfc0e148..0000000000
--- a/v1/bin/ansible-vault
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, James Tanner <tanner.jc@gmail.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-vault is a script that encrypts/decrypts YAML files. See
-# http://docs.ansible.com/playbooks_vault.html for more details.
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-import traceback
-
-import ansible.constants as C
-
-from ansible import utils
-from ansible import errors
-from ansible.utils.vault import VaultEditor
-
-from optparse import OptionParser
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
- parser.add_option('--debug', dest='debug', action="store_true", help="debug")
- parser.add_option('--vault-password-file', dest='password_file',
- help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
-
- # options specific to actions
- if action == "create":
- parser.set_usage("usage: %prog create [options] file_name")
- elif action == "decrypt":
- parser.set_usage("usage: %prog decrypt [options] file_name")
- elif action == "edit":
- parser.set_usage("usage: %prog edit [options] file_name")
- elif action == "view":
- parser.set_usage("usage: %prog view [options] file_name")
- elif action == "encrypt":
- parser.set_usage("usage: %prog encrypt [options] file_name")
- elif action == "rekey":
- parser.set_usage("usage: %prog rekey [options] file_name")
-
- # done, return the parser
- return parser
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-#-------------------------------------------------------------------------------------
-# Command functions
-#-------------------------------------------------------------------------------------
-
-def execute_create(args, options, parser):
- if len(args) > 1:
- raise errors.AnsibleError("'create' does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- this_editor = VaultEditor(cipher, password, args[0])
- this_editor.create_file()
-
-def execute_decrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.decrypt_file()
-
- print "Decryption successful"
-
-def execute_edit(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("edit does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.edit_file()
-
-def execute_view(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("view does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.view_file()
-
-def execute_encrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.encrypt_file()
-
- print "Encryption successful"
-
-def execute_rekey(args, options, parser):
-
- if not options.password_file:
- password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
-
- cipher = None
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.rekey_file(new_password)
-
- print "Rekey successful"
-
-#-------------------------------------------------------------------------------------
-# MAIN
-#-------------------------------------------------------------------------------------
-
-def main():
-
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- if not len(args):
- raise errors.AnsibleError(
- "The '%s' command requires a filename as the first argument" % action
- )
-
- # execute the desired action
- try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- except Exception, err:
- if options.debug:
- print traceback.format_exc()
- print "ERROR:",err
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/v1/hacking/README.md b/v1/hacking/README.md
deleted file mode 100644
index ae8db7e3a9..0000000000
--- a/v1/hacking/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-'Hacking' directory tools
-=========================
-
-Env-setup
----------
-
-The 'env-setup' script modifies your environment to allow you to run
-ansible from a git checkout using python 2.6+. (You may not use
-python 3 at this time).
-
-First, set up your environment to run from the checkout:
-
- $ source ./hacking/env-setup
-
-You will need some basic prerequisites installed. If you do not already have them
-and do not wish to install them from your operating system package manager, you
-can install them from pip
-
- $ easy_install pip # if pip is not already available
- $ pip install pyyaml jinja2 nose passlib pycrypto
-
-From there, follow ansible instructions on docs.ansible.com as normal.
-
-Test-module
------------
-
-'test-module' is a simple program that allows module developers (or testers) to run
-a module outside of the ansible program, locally, on the current machine.
-
-Example:
-
- $ ./hacking/test-module -m lib/ansible/modules/core/commands/shell -a "echo hi"
-
-This is a good way to insert a breakpoint into a module, for instance.
-
-Module-formatter
-----------------
-
-The module formatter is a script used to generate manpages and online
-module documentation. This is used by the system makefiles and rarely
-needs to be run directly.
-
-Authors
--------
-'authors' is a simple script that generates a list of everyone who has
-contributed code to the ansible repository.
-
-
diff --git a/v1/hacking/authors.sh b/v1/hacking/authors.sh
deleted file mode 100755
index 7c97840b2f..0000000000
--- a/v1/hacking/authors.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-# script from http://stackoverflow.com/questions/12133583
-set -e
-
-# Get a list of authors ordered by number of commits
-# and remove the commit count column
-AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f)
-if [ -z "$AUTHORS" ] ; then
- echo "Authors list was empty"
- exit 1
-fi
-
-# Display the authors list and write it to the file
-echo "$AUTHORS" | tee "$(git rev-parse --show-toplevel)/AUTHORS.TXT"
diff --git a/v1/hacking/env-setup b/v1/hacking/env-setup
deleted file mode 100644
index 29f4828410..0000000000
--- a/v1/hacking/env-setup
+++ /dev/null
@@ -1,78 +0,0 @@
-# usage: source hacking/env-setup [-q]
-# modifies environment for running Ansible from checkout
-
-# Default values for shell variables we use
-PYTHONPATH=${PYTHONPATH-""}
-PATH=${PATH-""}
-MANPATH=${MANPATH-""}
-verbosity=${1-info} # Defaults to `info' if unspecified
-
-if [ "$verbosity" = -q ]; then
- verbosity=silent
-fi
-
-# When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE
-if [ -n "$BASH_SOURCE" ] ; then
- HACKING_DIR=$(dirname "$BASH_SOURCE")
-elif [ $(basename -- "$0") = "env-setup" ]; then
- HACKING_DIR=$(dirname "$0")
-# Works with ksh93 but not pdksh
-elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then
- HACKING_DIR=$(dirname "${.sh.file}")
-else
- HACKING_DIR="$PWD/hacking"
-fi
-# The below is an alternative to readlink -fn which doesn't exist on OS X
-# Source: http://stackoverflow.com/a/1678636
-FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-ANSIBLE_HOME=$(dirname "$FULL_PATH")
-
-PREFIX_PYTHONPATH="$ANSIBLE_HOME"
-PREFIX_PATH="$ANSIBLE_HOME/bin"
-PREFIX_MANPATH="$ANSIBLE_HOME/docs/man"
-
-expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH"
-expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH"
-expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH"
-
-#
-# Generate egg_info so that pkg_resources works
-#
-
-# Do the work in a function so we don't repeat ourselves later
-gen_egg_info()
-{
- if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then
- rm -r "$PREFIX_PYTHONPATH/ansible.egg-info"
- fi
- python setup.py egg_info
-}
-
-if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
- current_dir="$PWD"
-else
- current_dir="$ANSIBLE_HOME"
-fi
-cd "$ANSIBLE_HOME"
-if [ "$verbosity" = silent ] ; then
- gen_egg_info > /dev/null 2>&1
-else
- gen_egg_info
-fi
-cd "$current_dir"
-
-if [ "$verbosity" != silent ] ; then
- cat <<- EOF
-
- Setting up Ansible to run out of checkout...
-
- PATH=$PATH
- PYTHONPATH=$PYTHONPATH
- MANPATH=$MANPATH
-
- Remember, you may wish to specify your host file with -i
-
- Done!
-
- EOF
-fi
diff --git a/v1/hacking/env-setup.fish b/v1/hacking/env-setup.fish
deleted file mode 100644
index 9deffb4e3d..0000000000
--- a/v1/hacking/env-setup.fish
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env fish
-# usage: . ./hacking/env-setup [-q]
-# modifies environment for running Ansible from checkout
-set HACKING_DIR (dirname (status -f))
-set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-set ANSIBLE_HOME (dirname $FULL_PATH)
-set PREFIX_PYTHONPATH $ANSIBLE_HOME/
-set PREFIX_PATH $ANSIBLE_HOME/bin
-set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
-
-# Set PYTHONPATH
-if not set -q PYTHONPATH
- set -gx PYTHONPATH $PREFIX_PYTHONPATH
-else
- switch PYTHONPATH
- case "$PREFIX_PYTHONPATH*"
- case "*"
- echo "Appending PYTHONPATH"
- set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH"
- end
-end
-
-# Set PATH
-if not contains $PREFIX_PATH $PATH
- set -gx PATH $PREFIX_PATH $PATH
-end
-
-# Set MANPATH
-if not contains $PREFIX_MANPATH $MANPATH
- if not set -q MANPATH
- set -gx MANPATH $PREFIX_MANPATH
- else
- set -gx MANPATH $PREFIX_MANPATH $MANPATH
- end
-end
-
-set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
-
-# Generate egg_info so that pkg_resources works
-pushd $ANSIBLE_HOME
-python setup.py egg_info
-if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
- rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
-end
-mv ansible*egg-info $PREFIX_PYTHONPATH
-popd
-
-
-if set -q argv
- switch $argv
- case '-q' '--quiet'
- case '*'
- echo ""
- echo "Setting up Ansible to run out of checkout..."
- echo ""
- echo "PATH=$PATH"
- echo "PYTHONPATH=$PYTHONPATH"
- echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
- echo "MANPATH=$MANPATH"
- echo ""
-
- echo "Remember, you may wish to specify your host file with -i"
- echo ""
- echo "Done!"
- echo ""
- end
-end
diff --git a/v1/hacking/module_formatter.py b/v1/hacking/module_formatter.py
deleted file mode 100755
index acddd70093..0000000000
--- a/v1/hacking/module_formatter.py
+++ /dev/null
@@ -1,447 +0,0 @@
-#!/usr/bin/env python
-# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
-# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import glob
-import sys
-import yaml
-import codecs
-import json
-import ast
-import re
-import optparse
-import time
-import datetime
-import subprocess
-import cgi
-from jinja2 import Environment, FileSystemLoader
-
-from ansible.utils import module_docs
-from ansible.utils.vars import merge_hash
-
-#####################################################################################
-# constants and paths
-
-# if a module is added in a version of Ansible older than this, don't print the version added information
-# in the module documentation because everyone is assumed to be running something newer than this already.
-TO_OLD_TO_BE_NOTABLE = 1.0
-
-# Get parent directory of the directory this script lives in
-MODULEDIR=os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
-))
-
-# The name of the DOCUMENTATION template
-EXAMPLE_YAML=os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
-))
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-
-DEPRECATED = " (D)"
-NOTCORE = " (E)"
-#####################################################################################
-
-def rst_ify(text):
- ''' convert symbols like I(this is in italics) to valid restructured text '''
-
- t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
- t = _BOLD.sub(r'**' + r"\1" + r"**", t)
- t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
- t = _URL.sub(r"\1", t)
- t = _CONST.sub(r'``' + r"\1" + r"``", t)
-
- return t
-
-#####################################################################################
-
-def html_ify(text):
- ''' convert symbols like I(this is in italics) to valid HTML '''
-
- t = cgi.escape(text)
- t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
- t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
- t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
- t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
- t = _CONST.sub("<code>" + r"\1" + "</code>", t)
-
- return t
-
-
-#####################################################################################
-
-def rst_fmt(text, fmt):
- ''' helper for Jinja2 to do format strings '''
-
- return fmt % (text)
-
-#####################################################################################
-
-def rst_xline(width, char="="):
- ''' return a restructured text line of a given length '''
-
- return char * width
-
-#####################################################################################
-
-def write_data(text, options, outputname, module):
- ''' dumps module output to a file or the screen, as requested '''
-
- if options.output_dir is not None:
- fname = os.path.join(options.output_dir, outputname % module)
- fname = fname.replace(".py","")
- f = open(fname, 'w')
- f.write(text.encode('utf-8'))
- f.close()
- else:
- print text
-
-#####################################################################################
-
-
-def list_modules(module_dir, depth=0):
- ''' returns a hash of categories, each category being a hash of module names to file paths '''
-
- categories = dict(all=dict(),_aliases=dict())
- if depth <= 3: # limit # of subdirs
-
- files = glob.glob("%s/*" % module_dir)
- for d in files:
-
- category = os.path.splitext(os.path.basename(d))[0]
- if os.path.isdir(d):
-
- res = list_modules(d, depth + 1)
- for key in res.keys():
- if key in categories:
- categories[key] = merge_hash(categories[key], res[key])
- res.pop(key, None)
-
- if depth < 2:
- categories.update(res)
- else:
- category = module_dir.split("/")[-1]
- if not category in categories:
- categories[category] = res
- else:
- categories[category].update(res)
- else:
- module = category
- category = os.path.basename(module_dir)
- if not d.endswith(".py") or d.endswith('__init__.py'):
- # windows powershell modules have documentation stubs in python docstring
- # format (they are not executed) so skip the ps1 format files
- continue
- elif module.startswith("_") and os.path.islink(d):
- source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0]
- module = module.replace("_","",1)
- if not d in categories['_aliases']:
- categories['_aliases'][source] = [module]
- else:
- categories['_aliases'][source].update(module)
- continue
-
- if not category in categories:
- categories[category] = {}
- categories[category][module] = d
- categories['all'][module] = d
-
- return categories
-
-#####################################################################################
-
-def generate_parser():
- ''' generate an optparse parser '''
-
- p = optparse.OptionParser(
- version='%prog 1.0',
- usage='usage: %prog [options] arg1 arg2',
- description='Generate module documentation from metadata',
- )
-
- p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
- p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
- p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
- p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
- p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
- p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
- p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
- p.add_option('-V', action='version', help='Show version number and exit')
- return p
-
-#####################################################################################
-
-def jinja2_environment(template_dir, typ):
-
- env = Environment(loader=FileSystemLoader(template_dir),
- variable_start_string="@{",
- variable_end_string="}@",
- trim_blocks=True,
- )
- env.globals['xline'] = rst_xline
-
- if typ == 'rst':
- env.filters['convert_symbols_to_format'] = rst_ify
- env.filters['html_ify'] = html_ify
- env.filters['fmt'] = rst_fmt
- env.filters['xline'] = rst_xline
- template = env.get_template('rst.j2')
- outputname = "%s_module.rst"
- else:
- raise Exception("unknown module format type: %s" % typ)
-
- return env, template, outputname
-
-#####################################################################################
-
-def process_module(module, options, env, template, outputname, module_map, aliases):
-
- fname = module_map[module]
- if isinstance(fname, dict):
- return "SKIPPED"
-
- basename = os.path.basename(fname)
- deprecated = False
-
- # ignore files with extensions
- if not basename.endswith(".py"):
- return
- elif module.startswith("_"):
- if os.path.islink(fname):
- return # ignore, its an alias
- deprecated = True
- module = module.replace("_","",1)
-
- print "rendering: %s" % module
-
- # use ansible core library to parse out doc metadata YAML and plaintext examples
- doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
-
- # crash if module is missing documentation and not explicitly hidden from docs index
- if doc is None:
- if module in module_docs.BLACKLIST_MODULES:
- return "SKIPPED"
- else:
- sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
- sys.exit(1)
-
- if deprecated and 'deprecated' not in doc:
- sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
- sys.exit(1)
-
- if "/core/" in fname:
- doc['core'] = True
- else:
- doc['core'] = False
-
- if module in aliases:
- doc['aliases'] = aliases[module]
-
- all_keys = []
-
- if not 'version_added' in doc:
- sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
- sys.exit(1)
-
- added = 0
- if doc['version_added'] == 'historical':
- del doc['version_added']
- else:
- added = doc['version_added']
-
- # don't show version added information if it's too old to be called out
- if added:
- added_tokens = str(added).split(".")
- added = added_tokens[0] + "." + added_tokens[1]
- added_float = float(added)
- if added and added_float < TO_OLD_TO_BE_NOTABLE:
- del doc['version_added']
-
- if 'options' in doc:
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
-
- all_keys = sorted(all_keys)
-
- doc['option_keys'] = all_keys
- doc['filename'] = fname
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['ansible_version'] = options.ansible_version
- doc['plainexamples'] = examples #plain text
- if returndocs:
- doc['returndocs'] = yaml.safe_load(returndocs)
- else:
- doc['returndocs'] = None
-
- # here is where we build the table of contents...
-
- text = template.render(doc)
- write_data(text, options, outputname, module)
- return doc['short_description']
-
-#####################################################################################
-
-def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
- modstring = module
- modname = module
- if module in deprecated:
- modstring = modstring + DEPRECATED
- modname = "_" + module
- elif module not in core:
- modstring = modstring + NOTCORE
-
- result = process_module(modname, options, env, template, outputname, module_map, aliases)
-
- if result != "SKIPPED":
- category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
-
-def process_category(category, categories, options, env, template, outputname):
-
- module_map = categories[category]
-
- aliases = {}
- if '_aliases' in categories:
- aliases = categories['_aliases']
-
- category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
- category_file = open(category_file_path, "w")
- print "*** recording category %s in %s ***" % (category, category_file_path)
-
- # TODO: start a new category file
-
- category = category.replace("_"," ")
- category = category.title()
-
- modules = []
- deprecated = []
- core = []
- for module in module_map.keys():
-
- if isinstance(module_map[module], dict):
- for mod in module_map[module].keys():
- if mod.startswith("_"):
- mod = mod.replace("_","",1)
- deprecated.append(mod)
- elif '/core/' in module_map[module][mod]:
- core.append(mod)
- else:
- if module.startswith("_"):
- module = module.replace("_","",1)
- deprecated.append(module)
- elif '/core/' in module_map[module]:
- core.append(module)
-
- modules.append(module)
-
- modules.sort()
-
- category_header = "%s Modules" % (category.title())
- underscores = "`" * len(category_header)
-
- category_file.write("""\
-%s
-%s
-
-.. toctree:: :maxdepth: 1
-
-""" % (category_header, underscores))
- sections = []
- for module in modules:
- if module in module_map and isinstance(module_map[module], dict):
- sections.append(module)
- continue
- else:
- print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases)
-
- sections.sort()
- for section in sections:
- category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
- category_file.write(".. toctree:: :maxdepth: 1\n\n")
-
- section_modules = module_map[section].keys()
- section_modules.sort()
- #for module in module_map[section]:
- for module in section_modules:
- print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases)
-
- category_file.write("""\n\n
-.. note::
- - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
-""" % (DEPRECATED, NOTCORE))
- category_file.close()
-
- # TODO: end a new category file
-
-#####################################################################################
-
-def validate_options(options):
- ''' validate option parser options '''
-
- if not options.module_dir:
- print >>sys.stderr, "--module-dir is required"
- sys.exit(1)
- if not os.path.exists(options.module_dir):
- print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir
- sys.exit(1)
- if not options.template_dir:
- print "--template-dir must be specified"
- sys.exit(1)
-
-#####################################################################################
-
-def main():
-
- p = generate_parser()
-
- (options, args) = p.parse_args()
- validate_options(options)
-
- env, template, outputname = jinja2_environment(options.template_dir, options.type)
-
- categories = list_modules(options.module_dir)
- last_category = None
- category_names = categories.keys()
- category_names.sort()
-
- category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
- category_list_file = open(category_list_path, "w")
- category_list_file.write("Module Index\n")
- category_list_file.write("============\n")
- category_list_file.write("\n\n")
- category_list_file.write(".. toctree::\n")
- category_list_file.write(" :maxdepth: 1\n\n")
-
- for category in category_names:
- if category.startswith("_"):
- continue
- category_list_file.write(" list_of_%s_modules\n" % category)
- process_category(category, categories, options, env, template, outputname)
-
- category_list_file.close()
-
-if __name__ == '__main__':
- main()
diff --git a/v1/hacking/templates/rst.j2 b/v1/hacking/templates/rst.j2
deleted file mode 100644
index f6f38e5910..0000000000
--- a/v1/hacking/templates/rst.j2
+++ /dev/null
@@ -1,211 +0,0 @@
-.. _@{ module }@:
-
-{% if short_description %}
-{% set title = module + ' - ' + short_description|convert_symbols_to_format %}
-{% else %}
-{% set title = module %}
-{% endif %}
-{% set title_len = title|length %}
-
-@{ title }@
-@{ '+' * title_len }@
-
-.. contents::
- :local:
- :depth: 1
-
-{# ------------------------------------------
- #
- # Please note: this looks like a core dump
- # but it isn't one.
- #
- --------------------------------------------#}
-
-{% if aliases is defined -%}
-Aliases: @{ ','.join(aliases) }@
-{% endif %}
-
-{% if deprecated is defined -%}
-DEPRECATED
-----------
-
-@{ deprecated }@
-{% endif %}
-
-Synopsis
---------
-
-{% if version_added is defined -%}
-.. versionadded:: @{ version_added }@
-{% endif %}
-
-{% for desc in description -%}
-@{ desc | convert_symbols_to_format }@
-{% endfor %}
-
-{% if options -%}
-Options
--------
-
-.. raw:: html
-
- <table border=1 cellpadding=4>
- <tr>
- <th class="head">parameter</th>
- <th class="head">required</th>
- <th class="head">default</th>
- <th class="head">choices</th>
- <th class="head">comments</th>
- </tr>
- {% for k in option_keys %}
- {% set v = options[k] %}
- <tr>
- <td>@{ k }@</td>
- <td>{% if v.get('required', False) %}yes{% else %}no{% endif %}</td>
- <td>{% if v['default'] %}@{ v['default'] }@{% endif %}</td>
- {% if v.get('type', 'not_bool') == 'bool' %}
- <td><ul><li>yes</li><li>no</li></ul></td>
- {% else %}
- <td><ul>{% for choice in v.get('choices',[]) -%}<li>@{ choice }@</li>{% endfor -%}</ul></td>
- {% endif %}
- <td>{% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %}</td>
- </tr>
- {% endfor %}
- </table>
-{% endif %}
-
-{% if requirements %}
-{% for req in requirements %}
-
-.. note:: Requires @{ req | convert_symbols_to_format }@
-
-{% endfor %}
-{% endif %}
-
-{% if examples or plainexamples %}
-Examples
---------
-
-.. raw:: html
-
-{% for example in examples %}
- {% if example['description'] %}<p>@{ example['description'] | html_ify }@</p>{% endif %}
- <p>
- <pre>
-@{ example['code'] | escape | indent(4, True) }@
- </pre>
- </p>
-{% endfor %}
- <br/>
-
-{% if plainexamples %}
-
-::
-
-@{ plainexamples | indent(4, True) }@
-{% endif %}
-{% endif %}
-
-
-{% if returndocs %}
-Return Values
--------------
-
-Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
-
-.. raw:: html
-
- <table border=1 cellpadding=4>
- <tr>
- <th class="head">name</th>
- <th class="head">description</th>
- <th class="head">returned</th>
- <th class="head">type</th>
- <th class="head">sample</th>
- </tr>
-
- {% for entry in returndocs %}
- <tr>
- <td> @{ entry }@ </td>
- <td> @{ returndocs[entry].description }@ </td>
- <td align=center> @{ returndocs[entry].returned }@ </td>
- <td align=center> @{ returndocs[entry].type }@ </td>
- <td align=center> @{ returndocs[entry].sample}@ </td>
- </tr>
- {% if returndocs[entry].type == 'dictionary' %}
- <tr><td>contains: </td>
- <td colspan=4>
- <table border=1 cellpadding=2>
- <tr>
- <th class="head">name</th>
- <th class="head">description</th>
- <th class="head">returned</th>
- <th class="head">type</th>
- <th class="head">sample</th>
- </tr>
-
- {% for sub in returndocs[entry].contains %}
- <tr>
- <td> @{ sub }@ </td>
- <td> @{ returndocs[entry].contains[sub].description }@ </td>
- <td align=center> @{ returndocs[entry].contains[sub].returned }@ </td>
- <td align=center> @{ returndocs[entry].contains[sub].type }@ </td>
- <td align=center> @{ returndocs[entry].contains[sub].sample}@ </td>
- </tr>
- {% endfor %}
-
- </table>
- </td></tr>
-
- {% endif %}
- {% endfor %}
-
- </table>
- </br></br>
-{% endif %}
-
-{% if notes %}
-{% for note in notes %}
-.. note:: @{ note | convert_symbols_to_format }@
-{% endfor %}
-{% endif %}
-
-
-{% if not deprecated %}
- {% if core %}
-
-This is a Core Module
----------------------
-
-The source of this module is hosted on GitHub in the `ansible-modules-core <http://github.com/ansible/ansible-modules-core>`_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core <http://github.com/ansible/ansible-modules-core>`_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group <https://groups.google.com/forum/#!forum/ansible-devel>`_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos.
-
- {% else %}
-
-This is an Extras Module
-------------------------
-
-This source of this module is hosted on GitHub in the `ansible-modules-extras <http://github.com/ansible/ansible-modules-extras>`_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras <http://github.com/ansible/ansible-modules-extras>`_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group <https://groups.google.com/forum/#!forum/ansible-devel>`_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests.
-Popular "extras" modules may be promoted to core modules over time.
-
- {% endif %}
-{% endif %}
-
-For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`.
-
-
diff --git a/v1/hacking/test-module b/v1/hacking/test-module
deleted file mode 100755
index c226f32e88..0000000000
--- a/v1/hacking/test-module
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-# this script is for testing modules without running through the
-# entire guts of ansible, and is very helpful for when developing
-# modules
-#
-# example:
-# test-module -m ../library/commands/command -a "/bin/sleep 3"
-# test-module -m ../library/system/service -a "name=httpd ensure=restarted"
-# test-module -m ../library/system/service -a "name=httpd ensure=restarted" --debugger /usr/bin/pdb
-# test-modulr -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
-
-import sys
-import base64
-import os
-import subprocess
-import traceback
-import optparse
-import ansible.utils as utils
-import ansible.module_common as module_common
-import ansible.constants as C
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-def parse():
- """parse command line
-
- :return : (options, args)"""
- parser = optparse.OptionParser()
-
- parser.usage = "%prog -[options] (-h for help)"
-
- parser.add_option('-m', '--module-path', dest='module_path',
- help="REQUIRED: full path of module source to execute")
- parser.add_option('-a', '--args', dest='module_args', default="",
- help="module argument string")
- parser.add_option('-D', '--debugger', dest='debugger',
- help="path to python debugger (e.g. /usr/bin/pdb)")
- parser.add_option('-I', '--interpreter', dest='interpreter',
- help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)",
- metavar='INTERPRETER_TYPE=INTERPRETER_PATH')
- parser.add_option('-c', '--check', dest='check', action='store_true',
- help="run the module in check mode")
- options, args = parser.parse_args()
- if not options.module_path:
- parser.print_help()
- sys.exit(1)
- else:
- return options, args
-
-def write_argsfile(argstring, json=False):
- """ Write args to a file for old-style module's use. """
- argspath = os.path.expanduser("~/.ansible_test_module_arguments")
- argsfile = open(argspath, 'w')
- if json:
- args = utils.parse_kv(argstring)
- argstring = utils.jsonify(args)
- argsfile.write(argstring)
- argsfile.close()
- return argspath
-
-def boilerplate_module(modfile, args, interpreter, check):
- """ simulate what ansible does with new style modules """
-
- #module_fh = open(modfile)
- #module_data = module_fh.read()
- #module_fh.close()
-
- replacer = module_common.ModuleReplacer()
-
- #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
-
- complex_args = {}
- if args.startswith("@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:]))
- args=''
- elif args.startswith("{"):
- # Argument is a YAML document (not a file)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args))
- args=''
-
- inject = {}
- if interpreter:
- if '=' not in interpreter:
- print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python'
- sys.exit(1)
- interpreter_type, interpreter_path = interpreter.split('=')
- if not interpreter_type.startswith('ansible_'):
- interpreter_type = 'ansible_%s' % interpreter_type
- if not interpreter_type.endswith('_interpreter'):
- interpreter_type = '%s_interpreter' % interpreter_type
- inject[interpreter_type] = interpreter_path
-
- if check:
- complex_args['CHECKMODE'] = True
-
- (module_data, module_style, shebang) = replacer.modify_module(
- modfile,
- complex_args,
- args,
- inject
- )
-
- modfile2_path = os.path.expanduser("~/.ansible_module_generated")
- print "* including generated source, if any, saving to: %s" % modfile2_path
- print "* this may offset any line numbers in tracebacks/debuggers!"
- modfile2 = open(modfile2_path, 'w')
- modfile2.write(module_data)
- modfile2.close()
- modfile = modfile2_path
-
- return (modfile2_path, module_style)
-
-def runtest( modfile, argspath):
- """Test run a module, piping it's output for reporting."""
-
- os.system("chmod +x %s" % modfile)
-
- invoke = "%s" % (modfile)
- if argspath is not None:
- invoke = "%s %s" % (modfile, argspath)
-
- cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
-
- try:
- print "***********************************"
- print "RAW OUTPUT"
- print out
- print err
- results = utils.parse_json(out)
- except:
- print "***********************************"
- print "INVALID OUTPUT FORMAT"
- print out
- traceback.print_exc()
- sys.exit(1)
-
- print "***********************************"
- print "PARSED OUTPUT"
- print utils.jsonify(results,format=True)
-
-def rundebug(debugger, modfile, argspath):
- """Run interactively with console debugger."""
-
- if argspath is not None:
- subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True)
- else:
- subprocess.call("%s %s" % (debugger, modfile), shell=True)
-
-def main():
-
- options, args = parse()
- (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check)
-
- argspath=None
- if module_style != 'new':
- if module_style == 'non_native_want_json':
- argspath = write_argsfile(options.module_args, json=True)
- elif module_style == 'old':
- argspath = write_argsfile(options.module_args, json=False)
- else:
- raise Exception("internal error, unexpected module style: %s" % module_style)
- if options.debugger:
- rundebug(options.debugger, modfile, argspath)
- else:
- runtest(modfile, argspath)
-
-if __name__ == "__main__":
- main()
-
diff --git a/v1/hacking/update.sh b/v1/hacking/update.sh
deleted file mode 100755
index 5979dd0ab2..0000000000
--- a/v1/hacking/update.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-git pull --rebase
-git submodule update --init --recursive
diff --git a/v1/tests/README.md b/v1/tests/README.md
deleted file mode 100644
index d0b3dd5abd..0000000000
--- a/v1/tests/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Unit tests
-==========
-
-Tests at code level. Should be concise and to the point, and organized by subject.
-
diff --git a/v1/tests/TestConstants.py b/v1/tests/TestConstants.py
deleted file mode 100644
index f3b96e8abc..0000000000
--- a/v1/tests/TestConstants.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-from ansible.constants import get_config
-import ConfigParser
-import random
-import string
-import os
-
-
-def random_string(length):
- return ''.join(random.choice(string.ascii_uppercase) for x in range(6))
-
-p = ConfigParser.ConfigParser()
-p.read(os.path.join(os.path.dirname(__file__), 'ansible.cfg'))
-
-class TestConstants(unittest.TestCase):
-
- #####################################
- ### get_config unit tests
-
-
- def test_configfile_and_env_both_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- os.environ[env_var] = r
-
- res = get_config(p, 'defaults', 'test_key', env_var, 'default')
- del os.environ[env_var]
-
- assert res == r
-
-
- def test_configfile_set_env_not_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- assert env_var not in os.environ
-
- res = get_config(p, 'defaults', 'test_key', env_var, 'default')
-
- print res
- assert res == 'test_value'
-
-
- def test_configfile_not_set_env_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- os.environ[env_var] = r
-
- res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default')
- del os.environ[env_var]
-
- assert res == r
-
-
- def test_configfile_not_set_env_not_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- assert env_var not in os.environ
-
- res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default')
-
- assert res == 'default'
diff --git a/v1/tests/TestFilters.py b/v1/tests/TestFilters.py
deleted file mode 100644
index 3c7eb4506e..0000000000
--- a/v1/tests/TestFilters.py
+++ /dev/null
@@ -1,191 +0,0 @@
-'''
-Test bundled filters
-'''
-
-import os.path
-import unittest, tempfile, shutil
-from ansible import playbook, inventory, callbacks
-import ansible.runner.filter_plugins.core
-import ansible.runner.filter_plugins.mathstuff
-
-INVENTORY = inventory.Inventory(['localhost'])
-
-BOOK = '''
-- hosts: localhost
- vars:
- var: { a: [1,2,3] }
- tasks:
- - template: src=%s dest=%s
-'''
-
-SRC = '''
--
-{{ var|to_json }}
--
-{{ var|to_nice_json }}
--
-{{ var|to_yaml }}
--
-{{ var|to_nice_yaml }}
-'''
-
-DEST = '''
--
-{"a": [1, 2, 3]}
--
-{
- "a": [
- 1,
- 2,
- 3
- ]
-}
--
-a: [1, 2, 3]
-
--
-a:
-- 1
-- 2
-- 3
-'''
-
-class TestFilters(unittest.TestCase):
-
- def setUp(self):
- self.tmpdir = tempfile.mkdtemp(dir='/tmp')
-
- def tearDown(self):
- shutil.rmtree(self.tmpdir)
-
- def temp(self, name, data=''):
- '''write a temporary file and return the name'''
- name = self.tmpdir + '/' + name
- with open(name, 'w') as f:
- f.write(data)
- return name
-
- def test_bool_none(self):
- a = ansible.runner.filter_plugins.core.bool(None)
- assert a == None
-
- def test_bool_true(self):
- a = ansible.runner.filter_plugins.core.bool(True)
- assert a == True
-
- def test_bool_yes(self):
- a = ansible.runner.filter_plugins.core.bool('Yes')
- assert a == True
-
- def test_bool_no(self):
- a = ansible.runner.filter_plugins.core.bool('Foo')
- assert a == False
-
- def test_quotes(self):
- a = ansible.runner.filter_plugins.core.quote('ls | wc -l')
- assert a == "'ls | wc -l'"
-
- def test_fileglob(self):
- pathname = os.path.join(os.path.dirname(__file__), '*')
- a = ansible.runner.filter_plugins.core.fileglob(pathname)
- assert __file__ in a
-
- def test_regex(self):
- a = ansible.runner.filter_plugins.core.regex('ansible', 'ansible',
- match_type='findall')
- assert a == True
-
- def test_match_case_sensitive(self):
- a = ansible.runner.filter_plugins.core.match('ansible', 'ansible')
- assert a == True
-
- def test_match_case_insensitive(self):
- a = ansible.runner.filter_plugins.core.match('ANSIBLE', 'ansible',
- True)
- assert a == True
-
- def test_match_no_match(self):
- a = ansible.runner.filter_plugins.core.match(' ansible', 'ansible')
- assert a == False
-
- def test_search_case_sensitive(self):
- a = ansible.runner.filter_plugins.core.search(' ansible ', 'ansible')
- assert a == True
-
- def test_search_case_insensitive(self):
- a = ansible.runner.filter_plugins.core.search(' ANSIBLE ', 'ansible',
- True)
- assert a == True
-
- def test_regex_replace_case_sensitive(self):
- a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^a.*i(.*)$',
- 'a\\1')
- assert a == 'able'
-
- def test_regex_replace_case_insensitive(self):
- a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^A.*I(.*)$',
- 'a\\1', True)
- assert a == 'able'
-
- def test_regex_replace_no_match(self):
- a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^b.*i(.*)$',
- 'a\\1')
- assert a == 'ansible'
-
- def test_to_uuid(self):
- a = ansible.runner.filter_plugins.core.to_uuid('example.com')
-
- assert a == 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'
-
- #def test_filters(self):
-
- # this test is pretty low level using a playbook, hence I am disabling it for now -- MPD.
- #return
-
- #src = self.temp('src.j2', SRC)
- #dest = self.temp('dest.txt')
- #book = self.temp('book', BOOK % (src, dest))
-
- #playbook.PlayBook(
- # playbook = book,
- # inventory = INVENTORY,
- # transport = 'local',
- # callbacks = callbacks.PlaybookCallbacks(),
- # runner_callbacks = callbacks.DefaultRunnerCallbacks(),
- # stats = callbacks.AggregateStats(),
- #).run()
-
- #out = open(dest).read()
- #self.assertEqual(DEST, out)
-
- def test_version_compare(self):
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(0, 1.1, 'lt', False))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.2, '<'))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '=='))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '='))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, 'eq'))
-
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'gt'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '>'))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'ne'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '!='))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '<>'))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'ge'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.1, '>='))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'le'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.0, 1.1, '<='))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare('12.04', 12, 'ge'))
-
- def test_min(self):
- a = ansible.runner.filter_plugins.mathstuff.min([3, 2, 5, 4])
- assert a == 2
-
- def test_max(self):
- a = ansible.runner.filter_plugins.mathstuff.max([3, 2, 5, 4])
- assert a == 5
diff --git a/v1/tests/TestInventory.py b/v1/tests/TestInventory.py
deleted file mode 100644
index b4bee4300e..0000000000
--- a/v1/tests/TestInventory.py
+++ /dev/null
@@ -1,510 +0,0 @@
-import os
-import unittest
-from nose.tools import raises
-
-from ansible import errors
-from ansible.inventory import Inventory
-
-class TestInventory(unittest.TestCase):
-
- def setUp(self):
-
- self.cwd = os.getcwd()
- self.test_dir = os.path.join(self.cwd, 'inventory_test_data')
-
- self.inventory_file = os.path.join(self.test_dir, 'simple_hosts')
- self.large_range_inventory_file = os.path.join(self.test_dir, 'large_range')
- self.complex_inventory_file = os.path.join(self.test_dir, 'complex_hosts')
- self.inventory_script = os.path.join(self.test_dir, 'inventory_api.py')
- self.inventory_dir = os.path.join(self.test_dir, 'inventory_dir')
-
- os.chmod(self.inventory_script, 0755)
-
- def tearDown(self):
- os.chmod(self.inventory_script, 0644)
-
- def compare(self, left, right, sort=True):
- if sort:
- left = sorted(left)
- right = sorted(right)
- print left
- print right
- assert left == right
-
- def empty_inventory(self):
- return Inventory(None)
-
- def simple_inventory(self):
- return Inventory(self.inventory_file)
-
- def large_range_inventory(self):
- return Inventory(self.large_range_inventory_file)
-
- def script_inventory(self):
- return Inventory(self.inventory_script)
-
- def complex_inventory(self):
- return Inventory(self.complex_inventory_file)
-
- def dir_inventory(self):
- return Inventory(self.inventory_dir)
-
- all_simple_hosts=['jupiter', 'saturn', 'zeus', 'hera',
- 'cerberus001','cerberus002','cerberus003',
- 'cottus99', 'cottus100',
- 'poseidon', 'thor', 'odin', 'loki',
- 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2',
- 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5',
- 'Hotep-a', 'Hotep-b', 'Hotep-c',
- 'BastC', 'BastD', 'neptun', 'goldorak', ]
-
- #####################################
- ### Empty inventory format tests
-
- def test_empty(self):
- inventory = self.empty_inventory()
- hosts = inventory.list_hosts()
- self.assertEqual(hosts, [])
-
- #####################################
- ### Simple inventory format tests
-
- def test_simple(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(self.all_simple_hosts))
-
- def test_simple_all(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts('all')
- self.assertEqual(sorted(hosts), sorted(self.all_simple_hosts))
-
- def test_get_hosts(self):
- inventory = Inventory('127.0.0.1,192.168.1.1')
- hosts = inventory.get_hosts('!10.0.0.1')
- hosts_all = inventory.get_hosts('all')
- self.assertEqual(sorted(hosts), sorted(hosts_all))
-
- def test_no_src(self):
- inventory = Inventory('127.0.0.1,')
- self.assertEqual(inventory.src(), None)
-
- def test_simple_norse(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("norse")
-
- expected_hosts=['thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_ungrouped(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("ungrouped")
-
- expected_hosts=['jupiter', 'saturn',
- 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2',
- 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_combined(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("norse:greek")
-
- expected_hosts=['zeus', 'hera', 'poseidon',
- 'cerberus001','cerberus002','cerberus003',
- 'cottus99','cottus100',
- 'thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_restrict(self):
- inventory = self.simple_inventory()
-
- restricted_hosts = ['hera', 'poseidon', 'thor']
- expected_hosts=['zeus', 'hera', 'poseidon',
- 'cerberus001','cerberus002','cerberus003',
- 'cottus99', 'cottus100',
- 'thor', 'odin', 'loki']
-
- inventory.restrict_to(restricted_hosts)
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(restricted_hosts)
-
- inventory.lift_restriction()
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_string_ipv4(self):
- inventory = Inventory('127.0.0.1,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1']))
-
- def test_simple_string_ipv4_port(self):
- inventory = Inventory('127.0.0.1:2222,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1']))
-
- def test_simple_string_ipv4_vars(self):
- inventory = Inventory('127.0.0.1:2222,192.168.1.1')
- var = inventory.get_variables('127.0.0.1')
- self.assertEqual(var['ansible_ssh_port'], 2222)
-
- def test_simple_string_ipv6(self):
- inventory = Inventory('FE80:EF45::12:1,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1']))
-
- def test_simple_string_ipv6_port(self):
- inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1']))
-
- def test_simple_string_ipv6_vars(self):
- inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1')
- var = inventory.get_variables('FE80:EF45::12:1')
- self.assertEqual(var['ansible_ssh_port'], 2222)
-
- def test_simple_string_fqdn(self):
- inventory = Inventory('foo.example.com,bar.example.com')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com']))
-
- def test_simple_string_fqdn_port(self):
- inventory = Inventory('foo.example.com:2222,bar.example.com')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com']))
-
- def test_simple_string_fqdn_vars(self):
- inventory = Inventory('foo.example.com:2222,bar.example.com')
- var = inventory.get_variables('foo.example.com')
- self.assertEqual(var['ansible_ssh_port'], 2222)
-
- def test_simple_vars(self):
- inventory = self.simple_inventory()
- vars = inventory.get_variables('thor')
-
- assert vars == {'group_names': ['norse'],
- 'inventory_hostname': 'thor',
- 'inventory_hostname_short': 'thor'}
-
- def test_simple_port(self):
- inventory = self.simple_inventory()
- vars = inventory.get_variables('hera')
-
- expected = { 'ansible_ssh_port': 3000,
- 'group_names': ['greek'],
- 'inventory_hostname': 'hera',
- 'inventory_hostname_short': 'hera' }
- assert vars == expected
-
- def test_large_range(self):
- inventory = self.large_range_inventory()
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted('bob%03i' %i for i in range(0, 143)))
-
- def test_subset(self):
- inventory = self.simple_inventory()
- inventory.subset('odin;thor,loki')
- self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin','loki']))
-
- def test_subset_range(self):
- inventory = self.simple_inventory()
- inventory.subset('greek[0-2];norse[0]')
- self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus','hera','thor']))
-
- def test_subet_range_empty_group(self):
- inventory = self.simple_inventory()
- inventory.subset('missing[0]')
- self.assertEqual(sorted(inventory.list_hosts()), sorted([]))
-
- def test_subset_filename(self):
- inventory = self.simple_inventory()
- inventory.subset('@' + os.path.join(self.test_dir, 'restrict_pattern'))
- self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin']))
-
- def test_vars_yaml_extension(self):
- inventory = self.simple_inventory()
- vars = inventory.get_variables('goldorak')
- assert vars['YAML_FILENAME_EXTENSIONS_TEST']
-
- @raises(errors.AnsibleError)
- def testinvalid_entry(self):
- Inventory('1234')
-
- ###################################################
- ### INI file advanced tests
-
- def test_complex_vars(self):
- inventory = self.complex_inventory()
-
- vars = inventory.get_variables('rtp_a')
- print vars
-
- expected = dict(
- a=1, b=2, c=3, d=10002, e=10003, f='10004 != 10005',
- g=' g ', h=' h ', i="' i \"", j='" j',
- k=[ 'k1', 'k2' ],
- rga=1, rgb=2, rgc=3,
- inventory_hostname='rtp_a', inventory_hostname_short='rtp_a',
- group_names=[ 'eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us' ]
- )
- print vars
- print expected
- assert vars == expected
-
- def test_complex_group_names(self):
- inventory = self.complex_inventory()
- tests = {
- 'host1': [ 'role1', 'role3' ],
- 'host2': [ 'role1', 'role2' ],
- 'host3': [ 'role2', 'role3' ]
- }
- for host, roles in tests.iteritems():
- group_names = inventory.get_variables(host)['group_names']
- assert sorted(group_names) == sorted(roles)
-
- def test_complex_exclude(self):
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("nc:florida:!triangle:!orlando")
- expected_hosts = ['miami', 'rtp_a', 'rtp_b', 'rtp_c']
- print "HOSTS=%s" % sorted(hosts)
- print "EXPECTED=%s" % sorted(expected_hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_regex_exclude(self):
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("~rtp_[ac]")
- expected_hosts = ['rtp_a', 'rtp_c']
- print "HOSTS=%s" % sorted(hosts)
- print "EXPECTED=%s" % sorted(expected_hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_regex_grouping(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("~(cer[a-z]|berc)(erus00[13])")
- expected_hosts = ['cerberus001', 'cerberus003']
- print "HOSTS=%s" % sorted(hosts)
- print "EXPECTED=%s" % sorted(expected_hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_complex_enumeration(self):
-
-
- expected1 = ['rtp_b']
- expected2 = ['rtp_a', 'rtp_b']
- expected3 = ['rtp_a', 'rtp_b', 'rtp_c', 'tri_a', 'tri_b', 'tri_c']
- expected4 = ['rtp_b', 'orlando' ]
- expected5 = ['blade-a-1']
-
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("nc[1]")
- self.compare(hosts, expected1, sort=False)
- hosts = inventory.list_hosts("nc[0-2]")
- self.compare(hosts, expected2, sort=False)
- hosts = inventory.list_hosts("nc[0-99999]")
- self.compare(hosts, expected3, sort=False)
- hosts = inventory.list_hosts("nc[1-2]:florida[0-1]")
- self.compare(hosts, expected4, sort=False)
- hosts = inventory.list_hosts("blade-a-1")
- self.compare(hosts, expected5, sort=False)
-
- def test_complex_intersect(self):
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("nc:&redundantgroup:!rtp_c")
- self.compare(hosts, ['rtp_a'])
- hosts = inventory.list_hosts("nc:&triangle:!tri_c")
- self.compare(hosts, ['tri_a', 'tri_b'])
-
- @raises(errors.AnsibleError)
- def test_invalid_range(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_range'))
-
- @raises(errors.AnsibleError)
- def test_missing_end(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_missing_end'))
-
- @raises(errors.AnsibleError)
- def test_incorrect_format(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_format'))
-
- @raises(errors.AnsibleError)
- def test_alpha_end_before_beg(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_alpha_end_before_beg'))
-
- def test_combined_range(self):
- i = Inventory(os.path.join(self.test_dir, 'inventory','test_combined_range'))
- hosts = i.list_hosts('test')
- expected_hosts=['host1A','host2A','host1B','host2B']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_leading_range(self):
- i = Inventory(os.path.join(self.test_dir, 'inventory','test_leading_range'))
- hosts = i.list_hosts('test')
- expected_hosts=['1.host','2.host','A.host','B.host']
- assert sorted(hosts) == sorted(expected_hosts)
-
- hosts2 = i.list_hosts('test2')
- expected_hosts2=['1.host','2.host','3.host']
- assert sorted(hosts2) == sorted(expected_hosts2)
-
- ###################################################
- ### Inventory API tests
-
- def test_script(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts()
-
- expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
-
- print "Expected: %s"%(expected_hosts)
- print "Got : %s"%(hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_all(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts('all')
-
- expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_norse(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts("norse")
-
- expected_hosts=['thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_combined(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts("norse:greek")
-
- expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_restrict(self):
- inventory = self.script_inventory()
-
- restricted_hosts = ['hera', 'poseidon', 'thor']
- expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
-
- inventory.restrict_to(restricted_hosts)
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(restricted_hosts)
-
- inventory.lift_restriction()
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_vars(self):
- inventory = self.script_inventory()
- vars = inventory.get_variables('thor')
-
- print "VARS=%s" % vars
-
- assert vars == {'hammer':True,
- 'group_names': ['norse'],
- 'inventory_hostname': 'thor',
- 'inventory_hostname_short': 'thor'}
-
- def test_hosts_list(self):
- # Test the case when playbook 'hosts' var is a list.
- inventory = self.script_inventory()
- host_names = sorted(['thor', 'loki', 'odin']) # Not sure if sorting is in the contract or not
- actual_hosts = inventory.get_hosts(host_names)
- actual_host_names = [host.name for host in actual_hosts]
- assert host_names == actual_host_names
-
- def test_script_multiple_groups(self):
- inventory = self.script_inventory()
- vars = inventory.get_variables('zeus')
-
- print "VARS=%s" % vars
-
- assert vars == {'inventory_hostname': 'zeus',
- 'inventory_hostname_short': 'zeus',
- 'group_names': ['greek', 'major-god']}
-
- def test_allows_equals_sign_in_var(self):
- inventory = self.simple_inventory()
- auth = inventory.get_variables('neptun')['auth']
- assert auth == 'YWRtaW46YWRtaW4='
-
- def test_dir_inventory(self):
- inventory = self.dir_inventory()
-
- host_vars = inventory.get_variables('zeus')
-
- expected_vars = {'inventory_hostname': 'zeus',
- 'inventory_hostname_short': 'zeus',
- 'group_names': ['greek', 'major-god'],
- 'var_a': '3#4'}
-
- print "HOST VARS=%s" % host_vars
- print "EXPECTED VARS=%s" % expected_vars
-
- assert host_vars == expected_vars
-
- def test_dir_inventory_multiple_groups(self):
- inventory = self.dir_inventory()
- group_greek = inventory.get_hosts('greek')
- actual_host_names = [host.name for host in group_greek]
- print "greek : %s " % actual_host_names
- assert actual_host_names == ['zeus', 'morpheus']
-
- def test_dir_inventory_skip_extension(self):
- inventory = self.dir_inventory()
- assert 'skipme' not in [h.name for h in inventory.get_hosts()]
-
- def test_dir_inventory_group_hosts(self):
- inventory = self.dir_inventory()
- expected_groups = {'all': ['morpheus', 'thor', 'zeus'],
- 'major-god': ['thor', 'zeus'],
- 'minor-god': ['morpheus'],
- 'norse': ['thor'],
- 'greek': ['morpheus', 'zeus'],
- 'ungrouped': []}
-
- actual_groups = {}
- for group in inventory.get_groups():
- actual_groups[group.name] = sorted([h.name for h in group.get_hosts()])
- print "INVENTORY groups[%s].hosts=%s" % (group.name, actual_groups[group.name])
- print "EXPECTED groups[%s].hosts=%s" % (group.name, expected_groups[group.name])
-
- assert actual_groups == expected_groups
-
- def test_dir_inventory_groups_for_host(self):
- inventory = self.dir_inventory()
- expected_groups_for_host = {'morpheus': ['all', 'greek', 'minor-god'],
- 'thor': ['all', 'major-god', 'norse'],
- 'zeus': ['all', 'greek', 'major-god']}
-
- actual_groups_for_host = {}
- for (host, expected) in expected_groups_for_host.iteritems():
- groups = inventory.groups_for_host(host)
- names = sorted([g.name for g in groups])
- actual_groups_for_host[host] = names
- print "INVENTORY groups_for_host(%s)=%s" % (host, names)
- print "EXPECTED groups_for_host(%s)=%s" % (host, expected)
-
- assert actual_groups_for_host == expected_groups_for_host
-
- def test_dir_inventory_groups_list(self):
- inventory = self.dir_inventory()
- inventory_groups = inventory.groups_list()
-
- expected_groups = {'all': ['morpheus', 'thor', 'zeus'],
- 'major-god': ['thor', 'zeus'],
- 'minor-god': ['morpheus'],
- 'norse': ['thor'],
- 'greek': ['morpheus', 'zeus'],
- 'ungrouped': []}
-
- for (name, expected_hosts) in expected_groups.iteritems():
- inventory_groups[name] = sorted(inventory_groups.get(name, []))
- print "INVENTORY groups_list['%s']=%s" % (name, inventory_groups[name])
- print "EXPECTED groups_list['%s']=%s" % (name, expected_hosts)
-
- assert inventory_groups == expected_groups
-
diff --git a/v1/tests/TestModuleUtilsBasic.py b/v1/tests/TestModuleUtilsBasic.py
deleted file mode 100644
index 5b8be28307..0000000000
--- a/v1/tests/TestModuleUtilsBasic.py
+++ /dev/null
@@ -1,334 +0,0 @@
-import os
-import tempfile
-
-import unittest
-from nose.tools import raises
-from nose.tools import timed
-
-from ansible import errors
-from ansible.module_common import ModuleReplacer
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils import checksum as utils_checksum
-
-TEST_MODULE_DATA = """
-from ansible.module_utils.basic import *
-
-def get_module():
- return AnsibleModule(
- argument_spec = dict(),
- supports_check_mode = True,
- no_log = True,
- )
-
-get_module()
-
-"""
-
-class TestModuleUtilsBasic(unittest.TestCase):
-
- def cleanup_temp_file(self, fd, path):
- try:
- os.close(fd)
- os.remove(path)
- except:
- pass
-
- def cleanup_temp_dir(self, path):
- try:
- os.rmdir(path)
- except:
- pass
-
- def setUp(self):
- # create a temporary file for the test module
- # we're about to generate
- self.tmp_fd, self.tmp_path = tempfile.mkstemp()
- os.write(self.tmp_fd, TEST_MODULE_DATA)
-
- # template the module code and eval it
- module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
-
- d = {}
- exec(module_data, d, d)
- self.module = d['get_module']()
-
- # module_utils/basic.py screws with CWD, let's save it and reset
- self.cwd = os.getcwd()
-
- def tearDown(self):
- self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
- # Reset CWD back to what it was before basic.py changed it
- os.chdir(self.cwd)
-
- #################################################################################
- # run_command() tests
-
- # test run_command with a string command
- def test_run_command_string(self):
- (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'")
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
- (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
-
- # test run_command with an array of args (with both use_unsafe_shell=True|False)
- def test_run_command_args(self):
- (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"])
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
- (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
-
- # test run_command with leading environment variables
- @raises(SystemExit)
- def test_run_command_string_with_env_variables(self):
- self.module.run_command('FOO=bar /bin/echo -n "foo bar"')
-
- @raises(SystemExit)
- def test_run_command_args_with_env_variables(self):
- self.module.run_command(['FOO=bar', '/bin/echo', '-n', 'foo bar'])
-
- def test_run_command_string_unsafe_with_env_variables(self):
- (rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
-
- # test run_command with a command pipe (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_pipe(self):
- (rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar\n')
-
- # test run_command with a shell redirect in (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_redirect_in(self):
- (rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar\n')
-
- # test run_command with a shell redirect out (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_redirect_out(self):
- tmp_fd, tmp_path = tempfile.mkstemp()
- try:
- (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertTrue(os.path.exists(tmp_path))
- checksum = utils_checksum(tmp_path)
- self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
- except:
- raise
- finally:
- self.cleanup_temp_file(tmp_fd, tmp_path)
-
- # test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_double_redirect_out(self):
- tmp_fd, tmp_path = tempfile.mkstemp()
- try:
- (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertTrue(os.path.exists(tmp_path))
- checksum = utils_checksum(tmp_path)
- self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
- except:
- raise
- finally:
- self.cleanup_temp_file(tmp_fd, tmp_path)
-
- # test run_command with data
- def test_run_command_string_with_data(self):
- (rc, out, err) = self.module.run_command('cat', data='foo bar')
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar\n')
-
- # test run_command with binary data
- def test_run_command_string_with_binary_data(self):
- (rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'ABCD')
-
- # test run_command with a cwd set
- def test_run_command_string_with_cwd(self):
- tmp_path = tempfile.mkdtemp()
- try:
- (rc, out, err) = self.module.run_command('pwd', cwd=tmp_path)
- self.assertEqual(rc, 0)
- self.assertTrue(os.path.exists(tmp_path))
- self.assertEqual(out.strip(), os.path.realpath(tmp_path))
- except:
- raise
- finally:
- self.cleanup_temp_dir(tmp_path)
-
-
-class TestModuleUtilsBasicHelpers(unittest.TestCase):
- ''' Test some implementation details of AnsibleModule
-
- Some pieces of AnsibleModule are implementation details but they have
- potential cornercases that we need to check. Go ahead and test at
- this level that the functions are behaving even though their API may
- change and we'd have to rewrite these tests so that we know that we
- need to check for those problems in any rewrite.
-
- In the future we might want to restructure higher level code to be
- friendlier to unittests so that we can test at the level that the public
- is interacting with the APIs.
- '''
-
- MANY_RECORDS = 7000
- URL_SECRET = 'http://username:pas:word@foo.com/data'
- SSH_SECRET = 'username:pas:word@foo.com/data'
-
- def cleanup_temp_file(self, fd, path):
- try:
- os.close(fd)
- os.remove(path)
- except:
- pass
-
- def cleanup_temp_dir(self, path):
- try:
- os.rmdir(path)
- except:
- pass
-
- def _gen_data(self, records, per_rec, top_level, secret_text):
- hostvars = {'hostvars': {}}
- for i in range(1, records, 1):
- host_facts = {'host%s' % i:
- {'pstack':
- {'running': '875.1',
- 'symlinked': '880.0',
- 'tars': [],
- 'versions': ['885.0']},
- }}
-
- if per_rec:
- host_facts['host%s' % i]['secret'] = secret_text
- hostvars['hostvars'].update(host_facts)
- if top_level:
- hostvars['secret'] = secret_text
- return hostvars
-
- def setUp(self):
- self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True,
- self.URL_SECRET))
- self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True,
- self.SSH_SECRET))
- self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True,
- self.URL_SECRET))
- self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True,
- self.SSH_SECRET))
- self.zero_secrets = repr(self._gen_data(self.MANY_RECORDS, False,
- False, ''))
- self.few_url = repr(self._gen_data(2, True, True, self.URL_SECRET))
- self.few_ssh = repr(self._gen_data(2, True, True, self.SSH_SECRET))
-
- # create a temporary file for the test module
- # we're about to generate
- self.tmp_fd, self.tmp_path = tempfile.mkstemp()
- os.write(self.tmp_fd, TEST_MODULE_DATA)
-
- # template the module code and eval it
- module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
-
- d = {}
- exec(module_data, d, d)
- self.module = d['get_module']()
-
- # module_utils/basic.py screws with CWD, let's save it and reset
- self.cwd = os.getcwd()
-
- def tearDown(self):
- self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
- # Reset CWD back to what it was before basic.py changed it
- os.chdir(self.cwd)
-
-
- #################################################################################
-
- #
- # Speed tests
- #
-
- # Previously, we used regexes which had some pathologically slow cases for
- # parameters with large amounts of data with many ':' but no '@'. The
- # present function gets slower when there are many replacements so we may
- # want to explore regexes in the future (for the speed when substituting
- # or flexibility). These speed tests will hopefully tell us if we're
- # introducing code that has cases that are simply too slow.
- #
- # Some regex notes:
- # * re.sub() is faster than re.match() + str.join().
- # * We may be able to detect a large number of '@' symbols and then use
- # a regex else use the present function.
-
- @timed(5)
- def test_log_sanitize_speed_many_url(self):
- heuristic_log_sanitize(self.many_url)
-
- @timed(5)
- def test_log_sanitize_speed_many_ssh(self):
- heuristic_log_sanitize(self.many_ssh)
-
- @timed(5)
- def test_log_sanitize_speed_one_url(self):
- heuristic_log_sanitize(self.one_url)
-
- @timed(5)
- def test_log_sanitize_speed_one_ssh(self):
- heuristic_log_sanitize(self.one_ssh)
-
- @timed(5)
- def test_log_sanitize_speed_zero_secrets(self):
- heuristic_log_sanitize(self.zero_secrets)
-
- #
- # Test that the password obfuscation sanitizes somewhat cleanly.
- #
-
- def test_log_sanitize_correctness(self):
- url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
- ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
-
- url_output = heuristic_log_sanitize(url_data)
- ssh_output = heuristic_log_sanitize(ssh_data)
-
- # Basic functionality: Successfully hid the password
- try:
- self.assertNotIn('pas:word', url_output)
- self.assertNotIn('pas:word', ssh_output)
-
- # Slightly more advanced, we hid all of the password despite the ":"
- self.assertNotIn('pas', url_output)
- self.assertNotIn('pas', ssh_output)
- except AttributeError:
- # python2.6 or less's unittest
- self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output))
- self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output))
-
- self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output))
- self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output))
-
- # In this implementation we replace the password with 8 "*" which is
- # also the length of our password. The url fields should be able to
- # accurately detect where the password ends so the length should be
- # the same:
- self.assertEqual(len(url_output), len(url_data))
-
- # ssh checking is harder as the heuristic is overzealous in many
- # cases. Since the input will have at least one ":" present before
- # the password we can tell some things about the beginning and end of
- # the data, though:
- self.assertTrue(ssh_output.startswith("{'"))
- self.assertTrue(ssh_output.endswith("}"))
- try:
- self.assertIn(":********@foo.com/data'", ssh_output)
- except AttributeError:
- # python2.6 or less's unittest
- self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output))
-
- # The overzealous-ness here may lead to us changing the algorithm in
- # the future. We could make it consume less of the data (with the
- # possibility of leaving partial passwords exposed) and encourage
- # people to use no_log instead of relying on this obfuscation.
diff --git a/v1/tests/TestModuleUtilsDatabase.py b/v1/tests/TestModuleUtilsDatabase.py
deleted file mode 100644
index 67da0b60e0..0000000000
--- a/v1/tests/TestModuleUtilsDatabase.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import collections
-import mock
-import os
-import re
-
-from nose.tools import eq_
-try:
- from nose.tools import assert_raises_regexp
-except ImportError:
- # Python < 2.7
- def assert_raises_regexp(expected, regexp, callable, *a, **kw):
- try:
- callable(*a, **kw)
- except expected as e:
- if isinstance(regexp, basestring):
- regexp = re.compile(regexp)
- if not regexp.search(str(e)):
- raise Exception('"%s" does not match "%s"' %
- (regexp.pattern, str(e)))
- else:
- if hasattr(expected,'__name__'): excName = expected.__name__
- else: excName = str(expected)
- raise AssertionError("%s not raised" % excName)
-
-from ansible.module_utils.database import (
- pg_quote_identifier,
- SQLParseError,
-)
-
-
-# Note: Using nose's generator test cases here so we can't inherit from
-# unittest.TestCase
-class TestQuotePgIdentifier(object):
-
- # These are all valid strings
- # The results are based on interpreting the identifier as a table name
- valid = {
- # User quoted
- '"public.table"': '"public.table"',
- '"public"."table"': '"public"."table"',
- '"schema test"."table test"': '"schema test"."table test"',
-
- # We quote part
- 'public.table': '"public"."table"',
- '"public".table': '"public"."table"',
- 'public."table"': '"public"."table"',
- 'schema test.table test': '"schema test"."table test"',
- '"schema test".table test': '"schema test"."table test"',
- 'schema test."table test"': '"schema test"."table test"',
-
- # Embedded double quotes
- 'table "test"': '"table ""test"""',
- 'public."table ""test"""': '"public"."table ""test"""',
- 'public.table "test"': '"public"."table ""test"""',
- 'schema "test".table': '"schema ""test"""."table"',
- '"schema ""test""".table': '"schema ""test"""."table"',
- '"""wat"""."""test"""': '"""wat"""."""test"""',
- # Sigh, handle these as well:
- '"no end quote': '"""no end quote"',
- 'schema."table': '"schema"."""table"',
- '"schema.table': '"""schema"."table"',
- 'schema."table.something': '"schema"."""table"."something"',
-
- # Embedded dots
- '"schema.test"."table.test"': '"schema.test"."table.test"',
- '"schema.".table': '"schema."."table"',
- '"schema."."table"': '"schema."."table"',
- 'schema.".table"': '"schema".".table"',
- '"schema".".table"': '"schema".".table"',
- '"schema.".".table"': '"schema.".".table"',
- # These are valid but maybe not what the user intended
- '."table"': '".""table"""',
- 'table.': '"table."',
- }
-
- invalid = {
- ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
- ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
- ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
- ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
- ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
- ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
- ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
- ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
- ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
- ('"schema."table"','table'): 'User escaped identifiers must escape extra quotes',
- ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
- }
-
- def check_valid_quotes(self, identifier, quoted_identifier):
- eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
-
- def test_valid_quotes(self):
- for identifier in self.valid:
- yield self.check_valid_quotes, identifier, self.valid[identifier]
-
- def check_invalid_quotes(self, identifier, id_type, msg):
- assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
-
- def test_invalid_quotes(self):
- for test in self.invalid:
- yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
-
- def test_how_many_dots(self):
- eq_(pg_quote_identifier('role', 'role'), '"role"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
-
- eq_(pg_quote_identifier('db', 'database'), '"db"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
-
- eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
-
- eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
-
- eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
diff --git a/v1/tests/TestModules.py b/v1/tests/TestModules.py
deleted file mode 100644
index aef2e83ed6..0000000000
--- a/v1/tests/TestModules.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-import ast
-import unittest
-from ansible import utils
-
-
-class TestModules(unittest.TestCase):
-
- def list_all_modules(self):
- paths = utils.plugins.module_finder._get_paths()
- paths = [x for x in paths if os.path.isdir(x)]
- module_list = []
- for path in paths:
- for (dirpath, dirnames, filenames) in os.walk(path):
- for filename in filenames:
- (path, ext) = os.path.splitext(filename)
- if ext == ".py":
- module_list.append(os.path.join(dirpath, filename))
- return module_list
-
- def test_ast_parse(self):
- module_list = self.list_all_modules()
- ERRORS = []
- # attempt to parse each module with ast
- for m in module_list:
- try:
- ast.parse(''.join(open(m)))
- except Exception, e:
- ERRORS.append((m, e))
- assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
diff --git a/v1/tests/TestPlayVarsFiles.py b/v1/tests/TestPlayVarsFiles.py
deleted file mode 100644
index 9d42b73e8b..0000000000
--- a/v1/tests/TestPlayVarsFiles.py
+++ /dev/null
@@ -1,390 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import shutil
-from tempfile import mkstemp
-from tempfile import mkdtemp
-from ansible.playbook.play import Play
-import ansible
-
-import unittest
-from nose.plugins.skip import SkipTest
-
-
-class FakeCallBacks(object):
- def __init__(self):
- pass
- def on_vars_prompt(self):
- pass
- def on_import_for_host(self, host, filename):
- pass
-
-class FakeInventory(object):
- def __init__(self):
- self.hosts = {}
- def basedir(self):
- return "."
- def src(self):
- return "fakeinventory"
- def get_variables(self, host, vault_password=None):
- if host in self.hosts:
- return self.hosts[host]
- else:
- return {}
-
-class FakePlayBook(object):
- def __init__(self):
- self.extra_vars = {}
- self.remote_user = None
- self.remote_port = None
- self.sudo = None
- self.sudo_user = None
- self.su = None
- self.su_user = None
- self.become = None
- self.become_method = None
- self.become_user = None
- self.transport = None
- self.only_tags = None
- self.skip_tags = None
- self.force_handlers = None
- self.VARS_CACHE = {}
- self.SETUP_CACHE = {}
- self.inventory = FakeInventory()
- self.callbacks = FakeCallBacks()
-
- self.VARS_CACHE['localhost'] = {}
-
-
-class TestMe(unittest.TestCase):
-
- ########################################
- # BASIC FILE LOADING BEHAVIOR TESTS
- ########################################
-
- def test_play_constructor(self):
- # __init__(self, playbook, ds, basedir, vault_password=None)
- playbook = FakePlayBook()
- ds = { "hosts": "localhost"}
- basedir = "."
- play = Play(playbook, ds, basedir)
-
- def test_vars_file(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # create a play with a vars_file
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [temp_path]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
-
- # make sure the variable was loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
-
- def test_vars_file_nonlist_error(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # create a play with a string for vars_files
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": temp_path}
- basedir = "."
- error_hit = False
- try:
- play = Play(playbook, ds, basedir)
- except:
- error_hit = True
- os.remove(temp_path)
-
- assert error_hit == True, "no error was thrown when vars_files was not a list"
-
-
- def test_multiple_vars_files(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # make a second vars file
- fd, temp_path2 = mkstemp()
- f = open(temp_path2, "wb")
- f.write("baz: bang\n")
- f.close()
-
-
- # create a play with two vars_files
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [temp_path, temp_path2]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
- os.remove(temp_path2)
-
- # make sure the variables were loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
- assert 'baz' in play.vars_file_vars, "vars_file2 was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['baz'] == 'bang', "baz was not set to bang in play.vars_file_vars"
-
- def test_vars_files_first_found(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # get a random file path
- fd, temp_path2 = mkstemp()
- # make sure this file doesn't exist
- os.remove(temp_path2)
-
- # create a play
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [[temp_path2, temp_path]]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
-
- # make sure the variable was loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
-
- def test_vars_files_multiple_found(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # make a second vars file
- fd, temp_path2 = mkstemp()
- f = open(temp_path2, "wb")
- f.write("baz: bang\n")
- f.close()
-
- # create a play
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [[temp_path, temp_path2]]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
- os.remove(temp_path2)
-
- # make sure the variables were loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
- assert 'baz' not in play.vars_file_vars, "vars_file2 was loaded after vars_file1 was loaded"
-
- def test_vars_files_assert_all_found(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # make a second vars file
- fd, temp_path2 = mkstemp()
- # make sure it doesn't exist
- os.remove(temp_path2)
-
- # create a play
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [temp_path, temp_path2]}
- basedir = "."
-
- error_hit = False
- error_msg = None
-
- try:
- play = Play(playbook, ds, basedir)
- except ansible.errors.AnsibleError, e:
- error_hit = True
- error_msg = e
-
- os.remove(temp_path)
- assert error_hit == True, "no error was thrown for missing vars_file"
-
-
- ########################################
- # VARIABLE PRECEDENCE TESTS
- ########################################
-
- # On the first run vars_files are loaded into play.vars_file_vars by host == None
- # * only files with vars from host==None will work here
- # On the secondary run(s), a host is given and the vars_files are loaded into VARS_CACHE
- # * this only occurs if host is not None, filename2 has vars in the name, and filename3 does not
-
- # filename -- the original string
- # filename2 -- filename templated with play vars
- # filename3 -- filename2 template with inject (hostvars + setup_cache + vars_cache)
- # filename4 -- path_dwim(filename3)
-
- def test_vars_files_for_host(self):
-
- # host != None
- # vars in filename2
- # no vars in filename3
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": ["{{ temp_path }}"]}
- basedir = "."
- playbook.VARS_CACHE['localhost']['temp_path'] = temp_path
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # the second run is started by calling update_vars_files
- play.update_vars_files(['localhost'])
- os.remove(temp_path)
-
- assert 'foo' in play.playbook.VARS_CACHE['localhost'], "vars_file vars were not loaded into vars_cache"
- assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', "foo does not equal bar"
-
-
- ########################################
- # COMPLEX FILENAME TEMPLATING TESTS
- ########################################
-
- def test_vars_files_two_vars_in_name(self):
-
- # self.vars_file_vars = ds['vars']
- # self.vars_file_vars += _get_vars() ... aka extra_vars
-
- # make a temp dir
- temp_dir = mkdtemp()
-
- # make a temp file
- fd, temp_file = mkstemp(dir=temp_dir)
- f = open(temp_file, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars": { "temp_dir": os.path.dirname(temp_file),
- "temp_file": os.path.basename(temp_file) },
- "vars_files": ["{{ temp_dir + '/' + temp_file }}"]}
- basedir = "."
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # cleanup
- shutil.rmtree(temp_dir)
-
- assert 'foo' in play.vars_file_vars, "double var templated vars_files filename not loaded"
-
- def test_vars_files_two_vars_different_scope(self):
-
- #
- # Use a play var and an inventory var to create the filename
- #
-
- # self.playbook.inventory.get_variables(host)
- # {'group_names': ['ungrouped'], 'inventory_hostname': 'localhost',
- # 'ansible_ssh_user': 'root', 'inventory_hostname_short': 'localhost'}
-
- # make a temp dir
- temp_dir = mkdtemp()
-
- # make a temp file
- fd, temp_file = mkstemp(dir=temp_dir)
- f = open(temp_file, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- playbook.inventory.hosts['localhost'] = {'inventory_hostname': os.path.basename(temp_file)}
- ds = { "hosts": "localhost",
- "vars": { "temp_dir": os.path.dirname(temp_file)},
- "vars_files": ["{{ temp_dir + '/' + inventory_hostname }}"]}
- basedir = "."
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # do the host run
- play.update_vars_files(['localhost'])
-
- # cleanup
- shutil.rmtree(temp_dir)
-
- assert 'foo' not in play.vars_file_vars, \
- "mixed scope vars_file loaded into play vars"
- assert 'foo' in play.playbook.VARS_CACHE['localhost'], \
- "differently scoped templated vars_files filename not loaded"
- assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', \
- "foo is not bar"
-
- def test_vars_files_two_vars_different_scope_first_found(self):
-
- #
- # Use a play var and an inventory var to create the filename
- #
-
- # make a temp dir
- temp_dir = mkdtemp()
-
- # make a temp file
- fd, temp_file = mkstemp(dir=temp_dir)
- f = open(temp_file, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- playbook.inventory.hosts['localhost'] = {'inventory_hostname': os.path.basename(temp_file)}
- ds = { "hosts": "localhost",
- "vars": { "temp_dir": os.path.dirname(temp_file)},
- "vars_files": [["{{ temp_dir + '/' + inventory_hostname }}"]]}
- basedir = "."
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # do the host run
- play.update_vars_files(['localhost'])
-
- # cleanup
- shutil.rmtree(temp_dir)
-
- assert 'foo' not in play.vars_file_vars, \
- "mixed scope vars_file loaded into play vars"
- assert 'foo' in play.playbook.VARS_CACHE['localhost'], \
- "differently scoped templated vars_files filename not loaded"
- assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', \
- "foo is not bar"
-
-
diff --git a/v1/tests/TestSynchronize.py b/v1/tests/TestSynchronize.py
deleted file mode 100644
index cf28ea5d80..0000000000
--- a/v1/tests/TestSynchronize.py
+++ /dev/null
@@ -1,176 +0,0 @@
-
-import unittest
-import getpass
-import os
-import shutil
-import time
-import tempfile
-from nose.plugins.skip import SkipTest
-
-from ansible.runner.action_plugins.synchronize import ActionModule as Synchronize
-
-class FakeRunner(object):
- def __init__(self):
- self.connection = None
- self.transport = None
- self.basedir = None
- self.sudo = None
- self.remote_user = None
- self.private_key_file = None
- self.check = False
- self.become = False
- self.become_method = 'sudo'
- self.become_user = False
-
- def _execute_module(self, conn, tmp, module_name, args,
- async_jid=None, async_module=None, async_limit=None, inject=None,
- persist_files=False, complex_args=None, delete_remote_tmp=True):
- self.executed_conn = conn
- self.executed_tmp = tmp
- self.executed_module_name = module_name
- self.executed_args = args
- self.executed_async_jid = async_jid
- self.executed_async_module = async_module
- self.executed_async_limit = async_limit
- self.executed_inject = inject
- self.executed_persist_files = persist_files
- self.executed_complex_args = complex_args
- self.executed_delete_remote_tmp = delete_remote_tmp
-
- def noop_on_check(self, inject):
- return self.check
-
-class FakeConn(object):
- def __init__(self):
- self.host = None
- self.delegate = None
-
-class TestSynchronize(unittest.TestCase):
-
-
- def test_synchronize_action_basic(self):
-
- """ verify the synchronize action plugin sets
- the delegate to 127.0.0.1 and remote path to user@host:/path """
-
- runner = FakeRunner()
- runner.remote_user = "root"
- runner.transport = "ssh"
- conn = FakeConn()
- inject = {
- 'inventory_hostname': "el6.lab.net",
- 'inventory_hostname_short': "el6",
- 'ansible_connection': None,
- 'ansible_ssh_user': 'root',
- 'delegate_to': None,
- 'playbook_dir': '.',
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_complex_args == {"dest":"root@el6.lab.net:/tmp/bar", "src":"/tmp/foo"}, "wrong args used"
- assert runner.sudo == None, "sudo was not reset to None"
-
- def test_synchronize_action_sudo(self):
-
- """ verify the synchronize action plugin unsets and then sets sudo """
-
- runner = FakeRunner()
- runner.become = True
- runner.remote_user = "root"
- runner.transport = "ssh"
- conn = FakeConn()
- inject = {
- 'inventory_hostname': "el6.lab.net",
- 'inventory_hostname_short': "el6",
- 'ansible_connection': None,
- 'ansible_ssh_user': 'root',
- 'delegate_to': None,
- 'playbook_dir': '.',
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
- 'src':'/tmp/foo',
- 'rsync_path':'"sudo rsync"'}, "wrong args used"
- assert runner.become == True, "sudo was not reset to True"
-
-
- def test_synchronize_action_local(self):
-
- """ verify the synchronize action plugin sets
- the delegate to 127.0.0.1 and does not alter the dest """
-
- runner = FakeRunner()
- runner.remote_user = "jtanner"
- runner.transport = "paramiko"
- conn = FakeConn()
- conn.host = "127.0.0.1"
- conn.delegate = "thishost"
- inject = {
- 'inventory_hostname': "thishost",
- 'ansible_ssh_host': '127.0.0.1',
- 'ansible_connection': 'local',
- 'delegate_to': None,
- 'playbook_dir': '.',
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.transport == "paramiko", "runner transport was changed"
- assert runner.remote_user == "jtanner", "runner remote_user was changed"
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert "dest_port" not in runner.executed_complex_args, "dest_port should not have been set"
- assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
- assert runner.executed_complex_args.get("dest") == "/tmp/bar", "dest was set incorrectly"
-
-
- def test_synchronize_action_vagrant(self):
-
- """ Verify the action plugin accommodates the common
- scenarios for vagrant boxes. """
-
- runner = FakeRunner()
- runner.remote_user = "jtanner"
- runner.transport = "ssh"
- conn = FakeConn()
- conn.host = "127.0.0.1"
- conn.delegate = "thishost"
- inject = {
- 'inventory_hostname': "thishost",
- 'ansible_ssh_user': 'vagrant',
- 'ansible_ssh_host': '127.0.0.1',
- 'ansible_ssh_port': '2222',
- 'delegate_to': None,
- 'playbook_dir': '.',
- 'hostvars': {
- 'thishost': {
- 'inventory_hostname': 'thishost',
- 'ansible_ssh_port': '2222',
- 'ansible_ssh_host': '127.0.0.1',
- 'ansible_ssh_user': 'vagrant'
- }
- }
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.transport == "ssh", "runner transport was changed"
- assert runner.remote_user == "jtanner", "runner remote_user was changed"
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_inject['ansible_ssh_user'] == "vagrant", "runner user was changed"
- assert runner.executed_complex_args.get("dest_port") == "2222", "remote port was not set to 2222"
- assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
- assert runner.executed_complex_args.get("dest") == "vagrant@127.0.0.1:/tmp/bar", "dest was set incorrectly"
-
diff --git a/v1/tests/TestUtils.py b/v1/tests/TestUtils.py
deleted file mode 100644
index c0ca9ba538..0000000000
--- a/v1/tests/TestUtils.py
+++ /dev/null
@@ -1,945 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import traceback
-import unittest
-import os
-import os.path
-import re
-import tempfile
-import yaml
-import passlib.hash
-import string
-import StringIO
-import copy
-import tempfile
-import shutil
-
-from nose.plugins.skip import SkipTest
-from mock import patch
-
-import ansible.utils
-import ansible.errors
-import ansible.constants as C
-import ansible.utils.template as template2
-from ansible.module_utils.splitter import split_args
-
-from ansible import __version__
-
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-class TestUtils(unittest.TestCase):
-
- def _is_fips(self):
- try:
- data = open('/proc/sys/crypto/fips_enabled').read().strip()
- except:
- return False
- if data != '1':
- return False
- return True
-
- def test_before_comment(self):
- ''' see if we can detect the part of a string before a comment. Used by INI parser in inventory '''
-
- input = "before # comment"
- expected = "before "
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- input = "before \# not a comment"
- expected = "before # not a comment"
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- input = ""
- expected = ""
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- input = "#"
- expected = ""
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- #####################################
- ### check_conditional tests
-
- def test_check_conditional_jinja2_literals(self):
- # see http://jinja.pocoo.org/docs/templates/#literals
-
- # none
- self.assertEqual(ansible.utils.check_conditional(
- None, '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '', '/', {}), True)
-
- # list
- self.assertEqual(ansible.utils.check_conditional(
- ['true'], '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- ['false'], '/', {}), False)
-
- # non basestring or list
- self.assertEqual(ansible.utils.check_conditional(
- {}, '/', {}), {})
-
- # boolean
- self.assertEqual(ansible.utils.check_conditional(
- 'true', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'false', '/', {}), False)
- self.assertEqual(ansible.utils.check_conditional(
- 'True', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'False', '/', {}), False)
-
- # integer
- self.assertEqual(ansible.utils.check_conditional(
- '1', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '0', '/', {}), False)
-
- # string, beware, a string is truthy unless empty
- self.assertEqual(ansible.utils.check_conditional(
- '"yes"', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '"no"', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '""', '/', {}), False)
-
-
- def test_check_conditional_jinja2_variable_literals(self):
- # see http://jinja.pocoo.org/docs/templates/#literals
-
- # boolean
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'True'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'true'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'False'}), False)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'false'}), False)
-
- # integer
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '1'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 1}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '0'}), False)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 0}), False)
-
- # string, beware, a string is truthy unless empty
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '"yes"'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '"no"'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '""'}), False)
-
- # Python boolean in Jinja2 expression
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': True}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': False}), False)
-
-
- def test_check_conditional_jinja2_expression(self):
- self.assertEqual(ansible.utils.check_conditional(
- '1 == 1', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'bar == 42', '/', {'bar': 42}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'bar != 42', '/', {'bar': 42}), False)
-
-
- def test_check_conditional_jinja2_expression_in_variable(self):
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '1 == 1'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'bar == 42', 'bar': 42}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'bar != 42', 'bar': 42}), False)
-
- def test_check_conditional_jinja2_unicode(self):
- self.assertEqual(ansible.utils.check_conditional(
- u'"\u00df"', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- u'var == "\u00df"', '/', {'var': u'\u00df'}), True)
-
-
- #####################################
- ### key-value parsing
-
- def test_parse_kv_basic(self):
- self.assertEqual(ansible.utils.parse_kv('a=simple b="with space" c="this=that"'),
- {'a': 'simple', 'b': 'with space', 'c': 'this=that'})
- self.assertEqual(ansible.utils.parse_kv('msg=АБВГД'),
- {'msg': 'АБВГД'})
-
-
- def test_jsonify(self):
- self.assertEqual(ansible.utils.jsonify(None), '{}')
- self.assertEqual(ansible.utils.jsonify(dict(foo='bar', baz=['qux'])), '{"baz": ["qux"], "foo": "bar"}')
- expected = u'{"baz":["qux"],"foo":"bar"}'
- self.assertEqual("".join(ansible.utils.jsonify(dict(foo='bar', baz=['qux']), format=True).split()), expected)
-
- def test_is_failed(self):
- self.assertEqual(ansible.utils.is_failed(dict(rc=0)), False)
- self.assertEqual(ansible.utils.is_failed(dict(rc=1)), True)
- self.assertEqual(ansible.utils.is_failed(dict()), False)
- self.assertEqual(ansible.utils.is_failed(dict(failed=False)), False)
- self.assertEqual(ansible.utils.is_failed(dict(failed=True)), True)
- self.assertEqual(ansible.utils.is_failed(dict(failed='True')), True)
- self.assertEqual(ansible.utils.is_failed(dict(failed='true')), True)
-
- def test_is_changed(self):
- self.assertEqual(ansible.utils.is_changed(dict()), False)
- self.assertEqual(ansible.utils.is_changed(dict(changed=False)), False)
- self.assertEqual(ansible.utils.is_changed(dict(changed=True)), True)
- self.assertEqual(ansible.utils.is_changed(dict(changed='True')), True)
- self.assertEqual(ansible.utils.is_changed(dict(changed='true')), True)
-
- def test_path_dwim(self):
- self.assertEqual(ansible.utils.path_dwim(None, __file__),
- __file__)
- self.assertEqual(ansible.utils.path_dwim(None, '~'),
- os.path.expanduser('~'))
- self.assertEqual(ansible.utils.path_dwim(None, 'TestUtils.py'),
- __file__.rstrip('c'))
-
- def test_path_dwim_relative(self):
- self.assertEqual(ansible.utils.path_dwim_relative(__file__, 'units', 'TestUtils.py',
- os.path.dirname(os.path.dirname(__file__))),
- __file__.rstrip('c'))
-
- def test_json_loads(self):
- self.assertEqual(ansible.utils.json_loads('{"foo": "bar"}'), dict(foo='bar'))
-
- def test_parse_json(self):
- # leading junk
- self.assertEqual(ansible.utils.parse_json('ansible\n{"foo": "bar"}'), dict(foo="bar"))
-
- # No closing quotation
- try:
- rc = ansible.utils.parse_json('foo=bar "')
- print rc
- except ValueError:
- pass
- else:
- traceback.print_exc()
- raise AssertionError('Incorrect exception, expected ValueError')
-
- # Failed to parse
- try:
- ansible.utils.parse_json('{')
- except ValueError:
- pass
- else:
- raise AssertionError('Incorrect exception, expected ValueError')
-
- def test_parse_yaml(self):
- #json
- self.assertEqual(ansible.utils.parse_yaml('{"foo": "bar"}'), dict(foo='bar'))
-
- # broken json
- try:
- ansible.utils.parse_yaml('{')
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError
-
- # broken json with path_hint
- try:
- ansible.utils.parse_yaml('{', path_hint='foo')
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError
-
- # yaml with front-matter
- self.assertEqual(ansible.utils.parse_yaml("---\nfoo: bar"), dict(foo='bar'))
- # yaml no front-matter
- self.assertEqual(ansible.utils.parse_yaml('foo: bar'), dict(foo='bar'))
- # yaml indented first line (See #6348)
- self.assertEqual(ansible.utils.parse_yaml(' - foo: bar\n baz: qux'), [dict(foo='bar', baz='qux')])
-
- def test_process_common_errors(self):
- # no quote
- self.assertTrue('YAML thought it' in ansible.utils.process_common_errors('', 'foo: {{bar}}', 6))
-
- # extra colon
- self.assertTrue('an extra unquoted colon' in ansible.utils.process_common_errors('', 'foo: bar:', 8))
-
- # match
- self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', 'foo: "{{bar}}"baz', 6))
- self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', "foo: '{{bar}}'baz", 6))
-
- # unbalanced
- self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', 'foo: "bad" "wolf"', 6))
- self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', "foo: 'bad' 'wolf'", 6))
-
-
- def test_process_yaml_error(self):
- data = 'foo: bar\n baz: qux'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, __file__)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Syntax Error while loading' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- data = 'foo: bar\n baz: {{qux}}'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, __file__)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Syntax Error while loading' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- data = '\xFF'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, __file__)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Check over' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- data = '\xFF'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, None)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Could not parse YAML.' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- def test_parse_yaml_from_file(self):
- test = os.path.join(os.path.dirname(__file__), 'inventory_test_data',
- 'common_vars.yml')
- encrypted = os.path.join(os.path.dirname(__file__), 'inventory_test_data',
- 'encrypted.yml')
- broken = os.path.join(os.path.dirname(__file__), 'inventory_test_data',
- 'broken.yml')
-
- try:
- ansible.utils.parse_yaml_from_file(os.path.dirname(__file__))
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError('Incorrect exception, expected AnsibleError')
-
- self.assertEqual(ansible.utils.parse_yaml_from_file(test), yaml.safe_load(open(test)))
-
- self.assertEqual(ansible.utils.parse_yaml_from_file(encrypted, 'ansible'), dict(foo='bar'))
-
- try:
- ansible.utils.parse_yaml_from_file(broken)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Syntax Error while loading' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- def test_merge_hash(self):
- self.assertEqual(ansible.utils.merge_hash(dict(foo='bar', baz='qux'), dict(foo='baz')),
- dict(foo='baz', baz='qux'))
- self.assertEqual(ansible.utils.merge_hash(dict(foo=dict(bar='baz')), dict(foo=dict(bar='qux'))),
- dict(foo=dict(bar='qux')))
-
- def test_md5s(self):
- if self._is_fips():
- raise SkipTest('MD5 unavailable on FIPs enabled systems')
- self.assertEqual(ansible.utils.md5s('ansible'), '640c8a5376aa12fa15cf02130ce239a6')
- # Need a test that causes UnicodeEncodeError See 4221
-
- def test_md5(self):
- if self._is_fips():
- raise SkipTest('MD5 unavailable on FIPs enabled systems')
- self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cfg')),
- 'fb7b5b90ea63f04bde33e804b6fad42c')
- self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')),
- None)
-
- def test_checksum_s(self):
- self.assertEqual(ansible.utils.checksum_s('ansible'), 'bef45157a43c9e5f469d188810814a4a8ab9f2ed')
- # Need a test that causes UnicodeEncodeError See 4221
-
- def test_checksum(self):
- self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cfg')),
- '658b67c8ac7595adde7048425ff1f9aba270721a')
- self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cf')),
- None)
-
- def test_default(self):
- self.assertEqual(ansible.utils.default(None, lambda: {}), {})
- self.assertEqual(ansible.utils.default(dict(foo='bar'), lambda: {}), dict(foo='bar'))
-
- def test__gitinfo(self):
- # this fails if not run from git clone
- # self.assertEqual('last updated' in ansible.utils._gitinfo())
- # missing test for git submodule
- # missing test outside of git clone
- pass
-
- def test_version(self):
- version = ansible.utils.version('ansible')
- self.assertTrue(version.startswith('ansible %s' % __version__))
- # this fails if not run from git clone
- # self.assertEqual('last updated' in version)
-
- def test_getch(self):
- # figure out how to test this
- pass
-
- def test_sanitize_output(self):
- self.assertEqual(ansible.utils.sanitize_output('password=foo'), 'password=VALUE_HIDDEN')
- self.assertEqual(ansible.utils.sanitize_output('foo=user:pass@foo/whatever'),
- 'foo=user:********@foo/whatever')
- self.assertEqual(ansible.utils.sanitize_output('foo=http://username:pass@wherever/foo'),
- 'foo=http://username:********@wherever/foo')
- self.assertEqual(ansible.utils.sanitize_output('foo=http://wherever/foo'),
- 'foo=http://wherever/foo')
-
- def test_increment_debug(self):
- ansible.utils.VERBOSITY = 0
- ansible.utils.increment_debug(None, None, None, None)
- self.assertEqual(ansible.utils.VERBOSITY, 1)
-
- def test_base_parser(self):
- output = ansible.utils.base_parser(output_opts=True)
- self.assertTrue(output.has_option('--one-line') and output.has_option('--tree'))
-
- runas = ansible.utils.base_parser(runas_opts=True)
- for opt in ['--sudo', '--sudo-user', '--user', '--su', '--su-user']:
- self.assertTrue(runas.has_option(opt))
-
- async = ansible.utils.base_parser(async_opts=True)
- self.assertTrue(async.has_option('--poll') and async.has_option('--background'))
-
- connect = ansible.utils.base_parser(connect_opts=True)
- self.assertTrue(connect.has_option('--connection'))
-
- subset = ansible.utils.base_parser(subset_opts=True)
- self.assertTrue(subset.has_option('--limit'))
-
- check = ansible.utils.base_parser(check_opts=True)
- self.assertTrue(check.has_option('--check'))
-
- diff = ansible.utils.base_parser(diff_opts=True)
- self.assertTrue(diff.has_option('--diff'))
-
- def test_do_encrypt(self):
- salt_chars = string.ascii_letters + string.digits + './'
- salt = ansible.utils.random_password(length=8, chars=salt_chars)
- hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt', salt=salt)
- self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash))
-
- hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt')
- self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash))
-
- try:
- ansible.utils.do_encrypt('ansible', 'ansible')
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError('Incorrect exception, expected AnsibleError')
-
- def test_do_encrypt_md5(self):
- if self._is_fips():
- raise SkipTest('MD5 unavailable on FIPS systems')
- hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4)
- self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash))
-
- def test_last_non_blank_line(self):
- self.assertEqual(ansible.utils.last_non_blank_line('a\n\nb\n\nc'), 'c')
- self.assertEqual(ansible.utils.last_non_blank_line(''), '')
-
- def test_filter_leading_non_json_lines(self):
- self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\n{"foo": "bar"}'),
- '{"foo": "bar"}\n')
- self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\n["foo", "bar"]'),
- '["foo", "bar"]\n')
-
- def test_boolean(self):
- self.assertEqual(ansible.utils.boolean("true"), True)
- self.assertEqual(ansible.utils.boolean("True"), True)
- self.assertEqual(ansible.utils.boolean("TRUE"), True)
- self.assertEqual(ansible.utils.boolean("t"), True)
- self.assertEqual(ansible.utils.boolean("T"), True)
- self.assertEqual(ansible.utils.boolean("Y"), True)
- self.assertEqual(ansible.utils.boolean("y"), True)
- self.assertEqual(ansible.utils.boolean("1"), True)
- self.assertEqual(ansible.utils.boolean(1), True)
- self.assertEqual(ansible.utils.boolean("false"), False)
- self.assertEqual(ansible.utils.boolean("False"), False)
- self.assertEqual(ansible.utils.boolean("0"), False)
- self.assertEqual(ansible.utils.boolean(0), False)
- self.assertEqual(ansible.utils.boolean("foo"), False)
-
- def test_make_sudo_cmd(self):
- cmd = ansible.utils.make_sudo_cmd(C.DEFAULT_SUDO_EXE, 'root', '/bin/sh', '/bin/ls')
- self.assertTrue(isinstance(cmd, tuple))
- self.assertEqual(len(cmd), 3)
- self.assertTrue('-u root' in cmd[0])
- self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key'))
- self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-'))
- self.assertTrue('sudo -k' in cmd[0])
-
- def test_make_su_cmd(self):
- cmd = ansible.utils.make_su_cmd('root', '/bin/sh', '/bin/ls')
- self.assertTrue(isinstance(cmd, tuple))
- self.assertEqual(len(cmd), 3)
- self.assertTrue('root -c "/bin/sh' in cmd[0] or ' root -c /bin/sh' in cmd[0])
- self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-'))
-
- def test_to_unicode(self):
- uni = ansible.utils.unicode.to_unicode(u'ansible')
- self.assertTrue(isinstance(uni, unicode))
- self.assertEqual(uni, u'ansible')
-
- none = ansible.utils.unicode.to_unicode(None, nonstring='passthru')
- self.assertTrue(isinstance(none, type(None)))
- self.assertTrue(none is None)
-
- utf8 = ansible.utils.unicode.to_unicode('ansible')
- self.assertTrue(isinstance(utf8, unicode))
- self.assertEqual(utf8, u'ansible')
-
- def test_is_list_of_strings(self):
- self.assertEqual(ansible.utils.is_list_of_strings(['foo', 'bar', u'baz']), True)
- self.assertEqual(ansible.utils.is_list_of_strings(['foo', 'bar', True]), False)
- self.assertEqual(ansible.utils.is_list_of_strings(['one', 2, 'three']), False)
-
- def test_contains_vars(self):
- self.assertTrue(ansible.utils.contains_vars('{{foo}}'))
- self.assertTrue(ansible.utils.contains_vars('$foo'))
- self.assertFalse(ansible.utils.contains_vars('foo'))
-
- def test_safe_eval(self):
- # Not basestring
- self.assertEqual(ansible.utils.safe_eval(len), len)
- self.assertEqual(ansible.utils.safe_eval(1), 1)
- self.assertEqual(ansible.utils.safe_eval(len, include_exceptions=True), (len, None))
- self.assertEqual(ansible.utils.safe_eval(1, include_exceptions=True), (1, None))
-
- # module
- self.assertEqual(ansible.utils.safe_eval('foo.bar('), 'foo.bar(')
- self.assertEqual(ansible.utils.safe_eval('foo.bar(', include_exceptions=True), ('foo.bar(', None))
-
- # import
- self.assertEqual(ansible.utils.safe_eval('import foo'), 'import foo')
- self.assertEqual(ansible.utils.safe_eval('import foo', include_exceptions=True), ('import foo', None))
-
- # valid simple eval
- self.assertEqual(ansible.utils.safe_eval('True'), True)
- self.assertEqual(ansible.utils.safe_eval('True', include_exceptions=True), (True, None))
-
- # valid eval with lookup
- self.assertEqual(ansible.utils.safe_eval('foo + bar', dict(foo=1, bar=2)), 3)
- self.assertEqual(ansible.utils.safe_eval('foo + bar', dict(foo=1, bar=2), include_exceptions=True), (3, None))
-
- # invalid eval
- self.assertEqual(ansible.utils.safe_eval('foo'), 'foo')
- nameerror = ansible.utils.safe_eval('foo', include_exceptions=True)
- self.assertTrue(isinstance(nameerror, tuple))
- self.assertEqual(nameerror[0], 'foo')
- self.assertTrue(isinstance(nameerror[1], NameError))
-
- def test_listify_lookup_plugin_terms(self):
- basedir = os.path.dirname(__file__)
- # Straight lookups
- #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=[])), [])
- #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two'])
-
- def test_deprecated(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- ansible.utils.deprecated('Ack!', '0.0')
- out = sys.stderr.getvalue()
- self.assertTrue('0.0' in out)
- self.assertTrue('[DEPRECATION WARNING]' in out)
-
- sys.stderr = StringIO.StringIO()
- ansible.utils.deprecated('Ack!', None)
- out = sys.stderr.getvalue()
- self.assertTrue('0.0' not in out)
- self.assertTrue('[DEPRECATION WARNING]' in out)
-
- sys.stderr = StringIO.StringIO()
- warnings = C.DEPRECATION_WARNINGS
- C.DEPRECATION_WARNINGS = False
- ansible.utils.deprecated('Ack!', None)
- out = sys.stderr.getvalue()
- self.assertTrue(not out)
- C.DEPRECATION_WARNINGS = warnings
-
- sys.stderr = sys_stderr
-
- try:
- ansible.utils.deprecated('Ack!', '0.0', True)
- except ansible.errors.AnsibleError, e:
- self.assertTrue('0.0' not in str(e))
- self.assertTrue('[DEPRECATED]' in str(e))
- else:
- raise AssertionError("Incorrect exception, expected AnsibleError")
-
- def test_warning(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- ansible.utils.warning('ANSIBLE')
- out = sys.stderr.getvalue()
- sys.stderr = sys_stderr
- self.assertTrue('[WARNING]: ANSIBLE' in out)
-
- def test_combine_vars(self):
- one = {'foo': {'bar': True}, 'baz': {'one': 'qux'}}
- two = {'baz': {'two': 'qux'}}
- replace = {'baz': {'two': 'qux'}, 'foo': {'bar': True}}
- merge = {'baz': {'two': 'qux', 'one': 'qux'}, 'foo': {'bar': True}}
-
- C.DEFAULT_HASH_BEHAVIOUR = 'replace'
- self.assertEqual(ansible.utils.combine_vars(one, two), replace)
-
- C.DEFAULT_HASH_BEHAVIOUR = 'merge'
- self.assertEqual(ansible.utils.combine_vars(one, two), merge)
-
- def test_err(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- ansible.utils.err('ANSIBLE')
- out = sys.stderr.getvalue()
- sys.stderr = sys_stderr
- self.assertEqual(out, 'ANSIBLE\n')
-
- def test_exit(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- try:
- ansible.utils.exit('ansible')
- except SystemExit, e:
- self.assertEqual(e.code, 1)
- self.assertEqual(sys.stderr.getvalue(), 'ansible\n')
- else:
- raise AssertionError('Incorrect exception, expected SystemExit')
- finally:
- sys.stderr = sys_stderr
-
- def test_unfrackpath(self):
- os.environ['TEST_ROOT'] = os.path.dirname(os.path.dirname(__file__))
- self.assertEqual(ansible.utils.unfrackpath('$TEST_ROOT/units/../units/TestUtils.py'), __file__.rstrip('c'))
-
- def test_is_executable(self):
- self.assertEqual(ansible.utils.is_executable(__file__), 0)
-
- bin_ansible = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
- 'bin', 'ansible')
- self.assertNotEqual(ansible.utils.is_executable(bin_ansible), 0)
-
- def test_get_diff(self):
- standard = dict(
- before_header='foo',
- after_header='bar',
- before='fooo',
- after='foo'
- )
-
- standard_expected = """--- before: foo
-+++ after: bar
-@@ -1 +1 @@
--fooo+foo"""
-
- # workaround py26 and py27 difflib differences
- standard_expected = """-fooo+foo"""
- diff = ansible.utils.get_diff(standard)
- diff = diff.split('\n')
- del diff[0]
- del diff[0]
- del diff[0]
- diff = '\n'.join(diff)
- self.assertEqual(diff, unicode(standard_expected))
-
- def test_split_args(self):
- # split_args is a smarter shlex.split for the needs of the way ansible uses it
-
- def _split_info(input, desired, actual):
- print "SENT: ", input
- print "WANT: ", desired
- print "GOT: ", actual
-
- def _test_combo(input, desired):
- actual = split_args(input)
- _split_info(input, desired, actual)
- assert actual == desired
-
- # trivial splitting
- _test_combo('a b=c d=f', ['a', 'b=c', 'd=f' ])
-
- # mixed quotes
- _test_combo('a b=\'c\' d="e" f=\'g\'', ['a', "b='c'", 'd="e"', "f='g'" ])
-
- # with spaces
- # FIXME: this fails, commenting out only for now
- # _test_combo('a "\'one two three\'"', ['a', "'one two three'" ])
-
- # TODO: ...
- # jinja2 preservation
- _test_combo('a {{ y }} z', ['a', '{{ y }}', 'z' ])
-
- # jinja2 preservation with spaces and filters and other hard things
- _test_combo(
- 'a {{ x | filter(\'moo\', \'param\') }} z {{ chicken }} "waffles"',
- ['a', "{{ x | filter('moo', 'param') }}", 'z', '{{ chicken }}', '"waffles"']
- )
-
- # invalid quote detection
- self.assertRaises(Exception, split_args, 'hey I started a quote"')
- self.assertRaises(Exception, split_args, 'hey I started a\' quote')
-
- # jinja2 loop blocks with lots of complexity
- _test_combo(
- # in memory of neighbors cat
- # we preserve line breaks unless a line continuation character precedes them
- 'a {% if x %} y {%else %} {{meow}} {% endif %} "cookie\nchip" \\\ndone\nand done',
- ['a', '{% if x %}', 'y', '{%else %}', '{{meow}}', '{% endif %}', '"cookie\nchip"', 'done\n', 'and', 'done']
- )
-
- # test space preservation within quotes
- _test_combo(
- 'content="1 2 3 4 " foo=bar',
- ['content="1 2 3 4 "', 'foo=bar']
- )
-
- # invalid jinja2 nesting detection
- # invalid quote nesting detection
-
- def test_clean_data(self):
- # clean data removes jinja2 tags from data
- self.assertEqual(
- ansible.utils._clean_data('this is a normal string', from_remote=True),
- 'this is a normal string'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string has a {{variable}}', from_remote=True),
- 'this string has a {#variable#}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string {{has}} two {{variables}} in it', from_remote=True),
- 'this string {#has#} two {#variables#} in it'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string has a {{variable with a\nnewline}}', from_remote=True),
- 'this string has a {#variable with a\nnewline#}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string is from inventory {{variable}}', from_inventory=True),
- 'this string is from inventory {{variable}}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string is from inventory too but uses lookup {{lookup("foo","bar")}}', from_inventory=True),
- 'this string is from inventory too but uses lookup {#lookup("foo","bar")#}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string has JSON in it: {"foo":{"bar":{"baz":"oops"}}}', from_remote=True),
- 'this string has JSON in it: {"foo":{"bar":{"baz":"oops"}}}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string contains unicode: ¢ £ ¤ ¥', from_remote=True),
- 'this string contains unicode: ¢ £ ¤ ¥'
- )
-
-
- def test_censor_unlogged_data(self):
- ''' used by the no_log attribute '''
- input = dict(
- password='sekrit',
- rc=12,
- failed=True,
- changed=False,
- skipped=True,
- msg='moo',
- )
- data = ansible.utils.censor_unlogged_data(input)
- assert 'password' not in data
- assert 'rc' in data
- assert 'failed' in data
- assert 'changed' in data
- assert 'skipped' in data
- assert 'msg' not in data
- assert data['censored'] == 'results hidden due to no_log parameter'
-
- def test_repo_url_to_role_name(self):
- tests = [("http://git.example.com/repos/repo.git", "repo"),
- ("ssh://git@git.example.com:repos/role-name", "role-name"),
- ("ssh://git@git.example.com:repos/role-name,v0.1", "role-name"),
- ("directory/role/is/installed/in", "directory/role/is/installed/in")]
- for (url, result) in tests:
- self.assertEqual(ansible.utils.repo_url_to_role_name(url), result)
-
- def test_role_spec_parse(self):
- tests = [
- (
- "git+http://git.example.com/repos/repo.git,v1.0",
- {
- 'scm': 'git',
- 'src': 'http://git.example.com/repos/repo.git',
- 'version': 'v1.0',
- 'name': 'repo'
- }
- ),
- (
- "http://repo.example.com/download/tarfile.tar.gz",
- {
- 'scm': None,
- 'src': 'http://repo.example.com/download/tarfile.tar.gz',
- 'version': '',
- 'name': 'tarfile'
- }
- ),
- (
- "http://repo.example.com/download/tarfile.tar.gz,,nicename",
- {
- 'scm': None,
- 'src': 'http://repo.example.com/download/tarfile.tar.gz',
- 'version': '',
- 'name': 'nicename'
- }
- ),
- (
- "git+http://git.example.com/repos/repo.git,v1.0,awesome",
- {
- 'scm': 'git',
- 'src': 'http://git.example.com/repos/repo.git',
- 'version': 'v1.0',
- 'name': 'awesome'
- }
- ),
- (
- # test that http://github URLs are assumed git+http:// unless they end in .tar.gz
- "http://github.com/ansible/fakerole/fake",
- {
- 'scm' : 'git',
- 'src' : 'http://github.com/ansible/fakerole/fake',
- 'version' : 'master',
- 'name' : 'fake'
- }
- ),
- (
- # test that http://github URLs are assumed git+http:// unless they end in .tar.gz
- "http://github.com/ansible/fakerole/fake/archive/master.tar.gz",
- {
- 'scm' : None,
- 'src' : 'http://github.com/ansible/fakerole/fake/archive/master.tar.gz',
- 'version' : '',
- 'name' : 'master'
- }
- )
- ]
- for (spec, result) in tests:
- self.assertEqual(ansible.utils.role_spec_parse(spec), result)
-
- def test_role_yaml_parse(self):
- tests = (
- (
- # Old style
- {
- 'role': 'debops.elasticsearch',
- 'name': 'elks'
- },
- {
- 'role': 'debops.elasticsearch',
- 'name': 'elks',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '',
- }
- ),
- (
- {
- 'role': 'debops.elasticsearch,1.0,elks',
- 'my_param': 'foo'
- },
- {
- 'role': 'debops.elasticsearch,1.0,elks',
- 'name': 'elks',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '1.0',
- 'my_param': 'foo',
- }
- ),
- (
- {
- 'role': 'debops.elasticsearch,1.0',
- 'my_param': 'foo'
- },
- {
- 'role': 'debops.elasticsearch,1.0',
- 'name': 'debops.elasticsearch',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '1.0',
- 'my_param': 'foo',
- }
- ),
- # New style
- (
- {
- 'src': 'debops.elasticsearch',
- 'name': 'elks',
- 'my_param': 'foo'
- },
- {
- 'name': 'elks',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '',
- 'my_param': 'foo'
- }
- ),
- )
-
- for (role, result) in tests:
- self.assertEqual(ansible.utils.role_yaml_parse(role), result)
-
- @patch('ansible.utils.plugins.module_finder._get_paths')
- def test_find_plugin(self, mock_get_paths):
-
- tmp_path = tempfile.mkdtemp()
- mock_get_paths.return_value = [tmp_path,]
- right_module_1 = 'module.py'
- right_module_2 = 'module_without_extension'
- wrong_module_1 = 'folder'
- wrong_module_2 = 'inexistent'
- path_right_module_1 = os.path.join(tmp_path, right_module_1)
- path_right_module_2 = os.path.join(tmp_path, right_module_2)
- path_wrong_module_1 = os.path.join(tmp_path, wrong_module_1)
- open(path_right_module_1, 'w').close()
- open(path_right_module_2, 'w').close()
- os.mkdir(path_wrong_module_1)
-
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(right_module_1),
- path_right_module_1)
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(right_module_2),
- path_right_module_2)
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(wrong_module_1),
- None)
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(wrong_module_2),
- None)
-
- shutil.rmtree(tmp_path)
diff --git a/v1/tests/TestUtilsStringFunctions.py b/v1/tests/TestUtilsStringFunctions.py
deleted file mode 100644
index cccedf280d..0000000000
--- a/v1/tests/TestUtilsStringFunctions.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-import os
-import os.path
-import tempfile
-import yaml
-import passlib.hash
-import string
-import StringIO
-import copy
-
-from nose.plugins.skip import SkipTest
-
-from ansible.utils import string_functions
-import ansible.errors
-import ansible.constants as C
-import ansible.utils.template as template2
-
-from ansible import __version__
-
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-class TestUtilsStringFunctions(unittest.TestCase):
- def test_isprintable(self):
- self.assertFalse(string_functions.isprintable(chr(7)))
- self.assertTrue(string_functions.isprintable('hello'))
-
- def test_count_newlines_from_end(self):
- self.assertEqual(string_functions.count_newlines_from_end('foo\n\n\n\n'), 4)
- self.assertEqual(string_functions.count_newlines_from_end('\nfoo'), 0)
diff --git a/v1/tests/TestVault.py b/v1/tests/TestVault.py
deleted file mode 100644
index b720d72e84..0000000000
--- a/v1/tests/TestVault.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-
-from unittest import TestCase
-import getpass
-import os
-import shutil
-import time
-import tempfile
-from binascii import unhexlify
-from binascii import hexlify
-from nose.plugins.skip import SkipTest
-
-from ansible import errors
-from ansible.utils.vault import VaultLib
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-class TestVaultLib(TestCase):
-
- def _is_fips(self):
- try:
- data = open('/proc/sys/crypto/fips_enabled').read().strip()
- except:
- return False
- if data != '1':
- return False
- return True
-
- def test_methods_exist(self):
- v = VaultLib('ansible')
- slots = ['is_encrypted',
- 'encrypt',
- 'decrypt',
- '_add_header',
- '_split_header',]
- for slot in slots:
- assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
-
- def test_is_encrypted(self):
- v = VaultLib(None)
- assert not v.is_encrypted("foobar"), "encryption check on plaintext failed"
- data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible")
- assert v.is_encrypted(data), "encryption check on headered text failed"
-
- def test_add_header(self):
- v = VaultLib('ansible')
- v.cipher_name = "TEST"
- sensitive_data = "ansible"
- data = v._add_header(sensitive_data)
- lines = data.split('\n')
- assert len(lines) > 1, "failed to properly add header"
- header = lines[0]
- assert header.endswith(';TEST'), "header does end with cipher name"
- header_parts = header.split(';')
- assert len(header_parts) == 3, "header has the wrong number of parts"
- assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
- assert header_parts[1] == v.version, "header version is incorrect"
- assert header_parts[2] == 'TEST', "header does end with cipher name"
-
- def test_split_header(self):
- v = VaultLib('ansible')
- data = "$ANSIBLE_VAULT;9.9;TEST\nansible"
- rdata = v._split_header(data)
- lines = rdata.split('\n')
- assert lines[0] == "ansible"
- assert v.cipher_name == 'TEST', "cipher name was not set"
- assert v.version == "9.9"
-
- def test_encrypt_decrypt_aes(self):
- if self._is_fips():
- raise SkipTest('MD5 not available on FIPS enabled systems')
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- v.cipher_name = 'AES'
- enc_data = v.encrypt("foobar")
- dec_data = v.decrypt(enc_data)
- assert enc_data != "foobar", "encryption failed"
- assert dec_data == "foobar", "decryption failed"
-
- def test_encrypt_decrypt_aes256(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- v.cipher_name = 'AES256'
- enc_data = v.encrypt("foobar")
- dec_data = v.decrypt(enc_data)
- assert enc_data != "foobar", "encryption failed"
- assert dec_data == "foobar", "decryption failed"
-
- def test_encrypt_encrypted(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- v.cipher_name = 'AES'
- data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible")
- error_hit = False
- try:
- enc_data = v.encrypt(data)
- except errors.AnsibleError, e:
- error_hit = True
- assert error_hit, "No error was thrown when trying to encrypt data with a header"
-
- def test_decrypt_decrypted(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- data = "ansible"
- error_hit = False
- try:
- dec_data = v.decrypt(data)
- except errors.AnsibleError, e:
- error_hit = True
- assert error_hit, "No error was thrown when trying to decrypt data without a header"
-
- def test_cipher_not_set(self):
- # not setting the cipher should default to AES256
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- data = "ansible"
- error_hit = False
- try:
- enc_data = v.encrypt(data)
- except errors.AnsibleError, e:
- error_hit = True
- assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
- assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
diff --git a/v1/tests/TestVaultEditor.py b/v1/tests/TestVaultEditor.py
deleted file mode 100644
index cfa5bc13e6..0000000000
--- a/v1/tests/TestVaultEditor.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env python
-
-from unittest import TestCase
-import getpass
-import os
-import shutil
-import time
-import tempfile
-from binascii import unhexlify
-from binascii import hexlify
-from nose.plugins.skip import SkipTest
-
-from ansible import errors
-from ansible.utils.vault import VaultLib
-from ansible.utils.vault import VaultEditor
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-class TestVaultEditor(TestCase):
-
- def _is_fips(self):
- try:
- data = open('/proc/sys/crypto/fips_enabled').read().strip()
- except:
- return False
- if data != '1':
- return False
- return True
-
- def test_methods_exist(self):
- v = VaultEditor(None, None, None)
- slots = ['create_file',
- 'decrypt_file',
- 'edit_file',
- 'encrypt_file',
- 'rekey_file',
- 'read_data',
- 'write_data',
- 'shuffle_files']
- for slot in slots:
- assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
-
- def test_decrypt_1_0(self):
- if self._is_fips():
- raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.decrypt_file()
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error decrypting 1.0 file"
- assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
-
- def test_decrypt_1_1_newline(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.1-ansible-newline-ansible.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible\nansible\n", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.decrypt_file()
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error decrypting 1.1 file with newline in password"
- #assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
-
-
- def test_decrypt_1_1(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.1.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.decrypt_file()
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error decrypting 1.1 file"
- assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
-
-
- def test_rekey_migration(self):
- if self._is_fips():
- raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.rekey_file('ansible2')
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error rekeying 1.0 file to 1.1"
-
- # ensure filedata can be decrypted, is 1.1 and is AES256
- vl = VaultLib("ansible2")
- dec_data = None
- error_hit = False
- try:
- dec_data = vl.decrypt(fdata)
- except errors.AnsibleError, e:
- error_hit = True
-
- assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
- assert error_hit == False, "error decrypting migrated 1.0 file"
- assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
-
-
diff --git a/v1/tests/ansible.cfg b/v1/tests/ansible.cfg
deleted file mode 100644
index dd99b8102d..0000000000
--- a/v1/tests/ansible.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[defaults]
-
-test_key = test_value
diff --git a/v1/tests/inventory_test_data/ansible_hosts b/v1/tests/inventory_test_data/ansible_hosts
deleted file mode 100644
index 94074edc3c..0000000000
--- a/v1/tests/inventory_test_data/ansible_hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-[somegroup]
-localhost
diff --git a/v1/tests/inventory_test_data/broken.yml b/v1/tests/inventory_test_data/broken.yml
deleted file mode 100644
index 0eccc1ba78..0000000000
--- a/v1/tests/inventory_test_data/broken.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-foo: bar
- baz: qux
diff --git a/v1/tests/inventory_test_data/common_vars.yml b/v1/tests/inventory_test_data/common_vars.yml
deleted file mode 100644
index c4c09b67f2..0000000000
--- a/v1/tests/inventory_test_data/common_vars.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-duck: quack
-cow: moo
-extguard: " '$favcolor' == 'blue' "
diff --git a/v1/tests/inventory_test_data/complex_hosts b/v1/tests/inventory_test_data/complex_hosts
deleted file mode 100644
index 34935c6330..0000000000
--- a/v1/tests/inventory_test_data/complex_hosts
+++ /dev/null
@@ -1,96 +0,0 @@
-# order of groups, children, and vars is not significant
-# so this example mixes them up for maximum testing
-
-[nc:children]
-rtp
-triangle
-
-[eastcoast:children]
-nc
-florida
-
-[us:children]
-eastcoast
-
-[redundantgroup]
-rtp_a
-
-[redundantgroup2]
-rtp_a
-
-[redundantgroup3:children]
-rtp
-
-[redundantgroup:vars]
-rga=1
-
-[redundantgroup2:vars]
-rgb=2
-
-[redundantgroup3:vars]
-rgc=3
-
-[nc:vars]
-b=10000
-c=10001
-d=10002
-e = 10003
- f = 10004 != 10005
- g = " g "
- h = ' h '
- i = ' i "
- j = " j
- k = ['k1', 'k2']
-
-[rtp]
-rtp_a
-rtp_b
-rtp_c
-
-[rtp:vars]
-a=1
-b=2
-c=3
-
-[triangle]
-tri_a
-tri_b
-tri_c
-
-[triangle:vars]
-a=11
-b=12
-c=13
-
-[florida]
-orlando
-miami
-
-[florida:vars]
-a=100
-b=101
-c=102
-
-
-[eastcoast:vars]
-b=100000
-c=100001
-d=100002
-
-[us:vars]
-c=1000000
-
-[role1]
-host[1:2]
-
-[role2]
-host[2:3]
-
-[role3]
-host[1:3:2]
-
-[role4]
-blade-[a:c]-[1:16]
-blade-[d:z]-[01:16].example.com
-blade-[1:10]-[1:16]
-host-e-[10:16].example.net:1234
diff --git a/v1/tests/inventory_test_data/encrypted.yml b/v1/tests/inventory_test_data/encrypted.yml
deleted file mode 100644
index ca33ab25cb..0000000000
--- a/v1/tests/inventory_test_data/encrypted.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-$ANSIBLE_VAULT;1.1;AES256
-33343734386261666161626433386662623039356366656637303939306563376130623138626165
-6436333766346533353463636566313332623130383662340a393835656134633665333861393331
-37666233346464636263636530626332623035633135363732623332313534306438393366323966
-3135306561356164310a343937653834643433343734653137383339323330626437313562306630
-3035
diff --git a/v1/tests/inventory_test_data/hosts_list.yml b/v1/tests/inventory_test_data/hosts_list.yml
deleted file mode 100644
index 09c5ca7c17..0000000000
--- a/v1/tests/inventory_test_data/hosts_list.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-# Test that playbooks support YAML lists of hosts.
----
-- hosts: [host1, host2, host3]
- connection: local
- tasks:
- - action: command true
diff --git a/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg b/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
deleted file mode 100644
index 1b7a478d87..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[Z:T]
diff --git a/v1/tests/inventory_test_data/inventory/test_combined_range b/v1/tests/inventory_test_data/inventory/test_combined_range
deleted file mode 100644
index cbcb41753e..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_combined_range
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[1:2][A:B]
diff --git a/v1/tests/inventory_test_data/inventory/test_incorrect_format b/v1/tests/inventory_test_data/inventory/test_incorrect_format
deleted file mode 100644
index 339bd59edf..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_incorrect_format
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[001:10]
diff --git a/v1/tests/inventory_test_data/inventory/test_incorrect_range b/v1/tests/inventory_test_data/inventory/test_incorrect_range
deleted file mode 100644
index 272ca7be71..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_incorrect_range
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[1:2:3:4]
diff --git a/v1/tests/inventory_test_data/inventory/test_leading_range b/v1/tests/inventory_test_data/inventory/test_leading_range
deleted file mode 100644
index bf390de42a..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_leading_range
+++ /dev/null
@@ -1,6 +0,0 @@
-[test]
-[1:2].host
-[A:B].host
-
-[test2] # comment
-[1:3].host
diff --git a/v1/tests/inventory_test_data/inventory/test_missing_end b/v1/tests/inventory_test_data/inventory/test_missing_end
deleted file mode 100644
index ff32042402..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_missing_end
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[1:]
diff --git a/v1/tests/inventory_test_data/inventory_api.py b/v1/tests/inventory_test_data/inventory_api.py
deleted file mode 100644
index 9bdca22ed3..0000000000
--- a/v1/tests/inventory_test_data/inventory_api.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-
-import json
-import sys
-
-from optparse import OptionParser
-
-parser = OptionParser()
-parser.add_option('-l', '--list', default=False, dest="list_hosts", action="store_true")
-parser.add_option('-H', '--host', default=None, dest="host")
-parser.add_option('-e', '--extra-vars', default=None, dest="extra")
-
-options, args = parser.parse_args()
-
-systems = {
- "ungrouped": [ "jupiter", "saturn" ],
- "greek": [ "zeus", "hera", "poseidon" ],
- "norse": [ "thor", "odin", "loki" ],
- "major-god": [ "zeus", "odin" ],
-}
-
-variables = {
- "thor": {
- "hammer": True
- },
- "zeus": {},
-}
-
-if options.list_hosts == True:
- print json.dumps(systems)
- sys.exit(0)
-
-if options.host is not None:
- if options.extra:
- k,v = options.extra.split("=")
- variables[options.host][k] = v
- if options.host in variables:
- print json.dumps(variables[options.host])
- else:
- print "{}"
- sys.exit(0)
-
-parser.print_help()
-sys.exit(1)
diff --git a/v1/tests/inventory_test_data/inventory_dir/0hosts b/v1/tests/inventory_test_data/inventory_dir/0hosts
deleted file mode 100644
index 6f78a33a22..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/0hosts
+++ /dev/null
@@ -1,3 +0,0 @@
-zeus var_a=0
-morpheus
-thor
diff --git a/v1/tests/inventory_test_data/inventory_dir/1mythology b/v1/tests/inventory_test_data/inventory_dir/1mythology
deleted file mode 100644
index 43fa181bd5..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/1mythology
+++ /dev/null
@@ -1,6 +0,0 @@
-[greek]
-zeus
-morpheus
-
-[norse]
-thor
diff --git a/v1/tests/inventory_test_data/inventory_dir/2levels b/v1/tests/inventory_test_data/inventory_dir/2levels
deleted file mode 100644
index 363294923e..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/2levels
+++ /dev/null
@@ -1,6 +0,0 @@
-[major-god]
-zeus var_a=2
-thor
-
-[minor-god]
-morpheus
diff --git a/v1/tests/inventory_test_data/inventory_dir/3comments b/v1/tests/inventory_test_data/inventory_dir/3comments
deleted file mode 100644
index e11b5e416b..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/3comments
+++ /dev/null
@@ -1,8 +0,0 @@
-[major-god] # group with inline comments
-zeus var_a="3\#4" # host with inline comments and "#" in the var string
-# A comment
-thor
-
-[minor-god] # group with inline comment and unbalanced quotes: ' "
-morpheus # host with inline comments and unbalanced quotes: ' "
-# A comment with unbalanced quotes: ' "
diff --git a/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini b/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
deleted file mode 100644
index a30afe5fcc..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[skip]
-skipme \ No newline at end of file
diff --git a/v1/tests/inventory_test_data/large_range b/v1/tests/inventory_test_data/large_range
deleted file mode 100644
index 18cfc22078..0000000000
--- a/v1/tests/inventory_test_data/large_range
+++ /dev/null
@@ -1 +0,0 @@
-bob[000:142]
diff --git a/v1/tests/inventory_test_data/restrict_pattern b/v1/tests/inventory_test_data/restrict_pattern
deleted file mode 100644
index fb16b4dda5..0000000000
--- a/v1/tests/inventory_test_data/restrict_pattern
+++ /dev/null
@@ -1,2 +0,0 @@
-odin
-thor
diff --git a/v1/tests/inventory_test_data/simple_hosts b/v1/tests/inventory_test_data/simple_hosts
deleted file mode 100644
index 08c62b4537..0000000000
--- a/v1/tests/inventory_test_data/simple_hosts
+++ /dev/null
@@ -1,28 +0,0 @@
-jupiter
-saturn
-thrudgelmir[:5]
-
-[greek]
-zeus
-hera:3000
-poseidon
-cerberus[001:003]
-cottus[99:100]
-
-[norse]
-thor
-odin
-loki
-
-[egyptian]
-Hotep-[a:c]
-Bast[C:D]
-
-[auth]
-neptun auth="YWRtaW46YWRtaW4="
-
-[parse:children]
-noparse
-
-[noparse]
-goldorak
diff --git a/v1/tests/module_tests/TestApt.py b/v1/tests/module_tests/TestApt.py
deleted file mode 100644
index e7f2dafc95..0000000000
--- a/v1/tests/module_tests/TestApt.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import collections
-import mock
-import os
-import unittest
-
-from ansible.modules.core.packaging.os.apt import (
- expand_pkgspec_from_fnmatches,
-)
-
-
-class AptExpandPkgspecTestCase(unittest.TestCase):
-
- def setUp(self):
- FakePackage = collections.namedtuple("Package", ("name",))
- self.fake_cache = [ FakePackage("apt"),
- FakePackage("apt-utils"),
- FakePackage("not-selected"),
- ]
-
- def test_trivial(self):
- foo = ["apt"]
- self.assertEqual(
- expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
-
- def test_version_wildcard(self):
- foo = ["apt=1.0*"]
- self.assertEqual(
- expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
-
- def test_pkgname_wildcard_version_wildcard(self):
- foo = ["apt*=1.0*"]
- m_mock = mock.Mock()
- self.assertEqual(
- expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
- ['apt', 'apt-utils'])
-
- def test_pkgname_expands(self):
- foo = ["apt*"]
- m_mock = mock.Mock()
- self.assertEqual(
- expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
- ["apt", "apt-utils"])
diff --git a/v1/tests/module_tests/TestDocker.py b/v1/tests/module_tests/TestDocker.py
deleted file mode 100644
index b8c8cf1e23..0000000000
--- a/v1/tests/module_tests/TestDocker.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import collections
-import os
-import unittest
-
-from ansible.modules.core.cloud.docker.docker import get_split_image_tag
-
-class DockerSplitImageTagTestCase(unittest.TestCase):
-
- def test_trivial(self):
- self.assertEqual(get_split_image_tag('test'), ('test', 'latest'))
-
- def test_with_org_name(self):
- self.assertEqual(get_split_image_tag('ansible/centos7-ansible'), ('ansible/centos7-ansible', 'latest'))
-
- def test_with_tag(self):
- self.assertEqual(get_split_image_tag('test:devel'), ('test', 'devel'))
-
- def test_with_tag_and_org_name(self):
- self.assertEqual(get_split_image_tag('ansible/centos7-ansible:devel'), ('ansible/centos7-ansible', 'devel'))
diff --git a/v1/tests/vault_test_data/foo-ansible-1.0.yml b/v1/tests/vault_test_data/foo-ansible-1.0.yml
deleted file mode 100644
index f71ddf10ce..0000000000
--- a/v1/tests/vault_test_data/foo-ansible-1.0.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-$ANSIBLE_VAULT;1.0;AES
-53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
-9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
-83c62ffb04c2512995e815de4b4d29ed
diff --git a/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
deleted file mode 100644
index 6e025a1c40..0000000000
--- a/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-$ANSIBLE_VAULT;1.1;AES256
-61333063333663376535373431643063613232393438623732643966613962363563383132363631
-3235363730623635323039623439343561313566313361630a313632643338613636303637623765
-64356531643630303636323064336439393335313836366235336464633635376339663830333232
-6338353337663139320a646632386131646431656165656338633535386535623236393265373634
-37656134633661333935346434363237613435323865356234323264663838643931
diff --git a/v1/tests/vault_test_data/foo-ansible-1.1.yml b/v1/tests/vault_test_data/foo-ansible-1.1.yml
deleted file mode 100644
index d9a4a448a6..0000000000
--- a/v1/tests/vault_test_data/foo-ansible-1.1.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-$ANSIBLE_VAULT;1.1;AES256
-62303130653266653331306264616235333735323636616539316433666463323964623162386137
-3961616263373033353631316333623566303532663065310a393036623466376263393961326530
-64336561613965383835646464623865663966323464653236343638373165343863623638316664
-3631633031323837340a396530313963373030343933616133393566366137363761373930663833
-3739